# HailoRT #
-HailoRT is a light-weight and production-grade run-time library, which runs on the host processor, and
-implements a robust user-space run-time library (HailoRT Library) responsible for operating a Hailo device, with intuitive APIs in C/C++ for optimized performance.
+HailoRT is a lightweight, production-grade runtime library that runs on the host processor and provides a robust
+user-space runtime library (the HailoRT Library) with intuitive APIs in C/C++ for optimized performance
-HailoRT is comprised of the following main components:
+HailoRT consists of the following main components:
- HailoRT Library.
-- HailoRT CLI - command line application used to control the Hailo device, run inference using the device,
- collect inference statistics and device events, etc.
-- [**HailoRT PCIe Driver**](https://github.com/hailo-ai/hailort-drivers) - the device driver used to manage the Hailo device, communicate with the device and transfer
- data to/from the device. The PCIe driver includes the Hailo-8 firmware that runs on the Hailo device, manages the boot and control of the Hailo device.
-- pyHailoRT - HailoRT Python API (wraps the run-time library)
+- HailoRT CLI - a command line application used to control the Hailo device, run inferences, collect statistics and device events, etc.
+- [**HailoRT PCIe Driver**](https://github.com/hailo-ai/hailort-drivers) - the device driver used to manage the Hailo device, communicate with the device,
+and transfer data to/from the device; it includes the Hailo-8 firmware that runs on the Hailo device and manages its boot and control.
+- pyHailoRT - HailoRT Python API, which wraps the runtime library.
- HailoRT GStreamer element (HailoNet).
-HailoRT supports Linux and Windows, and can be compiled from sources to be integrated with various x86 and ARM processors.
+HailoRT supports Linux and Windows, and it can be compiled from sources to be integrated with various x86 and ARM processors.
## Usage
#define CONTROL_PROTOCOL__SOC_ID_LENGTH (32)
#define CONTROL_PROTOCOL__MAX_CFG_CHANNELS (4)
#define CONTROL_PROTOCOL__MAX_NETWORKS_PER_NETWORK_GROUP (8)
+#define CONTROL_PROTOCOL__MAX_VDMA_CHANNELS_PER_ENGINE (32)
#define CONTROL_PROTOCOL__MAX_VDMA_ENGINES_COUNT (3)
+#define CONTROL_PROTOCOL__MAX_TOTAL_CHANNEL_COUNT \
+ (CONTROL_PROTOCOL__MAX_VDMA_CHANNELS_PER_ENGINE * CONTROL_PROTOCOL__MAX_VDMA_ENGINES_COUNT)
/* Tightly coupled with the sizeof PROCESS_MONITOR__detection_results_t
and HAILO_SOC_PM_VALUES_BYTES_LENGTH */
#define PM_RESULTS_LENGTH (24)
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_CLEAR_CONFIGURED_APPS, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_GET_HW_CONSTS, false, CPU_ID_CORE_CPU)\
CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SET_SLEEP_STATE, false, CPU_ID_APP_CPU)\
+ CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CHANGE_HW_INFER_STATUS, false, CPU_ID_CORE_CPU)\
typedef enum {
#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) name,
CONTROL_PROTOCOL__HAILO8_A0 = 0,
CONTROL_PROTOCOL__HAILO8,
CONTROL_PROTOCOL__HAILO8L,
- CONTROL_PROTOCOL__MERCURY_CA,
- CONTROL_PROTOCOL__MERCURY_VPU,
+ CONTROL_PROTOCOL__HAILO15,
/* Must be last!! */
CONTROL_PROTOCOL__DEVICE_ARCHITECTURE_COUNT
} CONTROL_PROTOCOL__device_architecture_t;
CONTROL_PROTOCOL__INFER_FEATURE_LIST_t infer_features;
CONTROL_PROTOCOL__VALIDATION_FEATURE_LIST_t validation_features;
uint8_t networks_count;
+ uint16_t csm_buffer_size;
uint16_t batch_size[CONTROL_PROTOCOL__MAX_NETWORKS_PER_NETWORK_GROUP];
+ uint32_t boundary_channels_bitmap[CONTROL_PROTOCOL__MAX_VDMA_ENGINES_COUNT];
} CONTROL_PROTOCOL__application_header_t;
typedef struct {
uint8_t buffer_type; // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
uint64_t dma_address;
uint16_t desc_page_size;
- uint32_t total_desc_count;
+ uint32_t total_desc_count; //HRT-9913 - Some descriptors may not be initialized (to save space), needs to
+ // change this param or add another one for validation.
uint32_t bytes_in_pattern;
} CONTROL_PROTOCOL__host_buffer_info_t;
uint8_t is_last_control_per_context;
uint32_t context_type_length;
uint8_t context_type; // CONTROL_PROTOCOL__context_switch_context_type_t
- uint32_t actions_count_length;
- uint32_t actions_count;
uint32_t context_network_data_length;
uint8_t context_network_data[0];
} CONTROL_PROTOCOL__context_switch_set_context_info_request_t;
#pragma warning(pop)
#endif
-typedef struct {
- /* Must be first */
- uint8_t action_type; // CONTEXT_SWITCH_DEFS__ACTION_TYPE_t
- bool is_repeated;
-} CONTROL_PROTOCOL__ACTION_HEADER_t;
typedef CONTROL_PROTOCOL__read_memory_request_t CONTROL_PROTOCOL__read_user_config_request_t;
typedef CONTROL_PROTOCOL__read_memory_response_t CONTROL_PROTOCOL__read_user_config_response_t;
CONTROL_PROTOCOL__hw_consts_t hw_consts;
} CONTROL_PROTOCOL__get_hw_consts_response_t;
+/* TODO HRT-9545 - Return and hw only parse results */
+typedef struct {
+ bool infer_done;
+ uint32_t infer_cycles;
+} CONTROL_PROTOCOL__hw_only_infer_results_t;
+
+typedef struct {
+ uint32_t results_length;
+ CONTROL_PROTOCOL__hw_only_infer_results_t results;
+} CONTROL_PROTOCOL__change_hw_infer_status_response_t;
+
+typedef struct {
+ uint8_t channel_index;
+ uint8_t engine_index;
+ uint16_t desc_programed;
+} CONTROL_PROTOCOL__hw_infer_channel_info_t;
+
+typedef struct {
+ CONTROL_PROTOCOL__hw_infer_channel_info_t channel_info[CONTROL_PROTOCOL__MAX_TOTAL_CHANNEL_COUNT];
+ uint8_t channel_count;
+} CONTROL_PROTOCOL__hw_infer_channels_info_t;
+
+typedef enum {
+ CONTROL_PROTOCOL__HW_INFER_STATE_START,
+ CONTROL_PROTOCOL__HW_INFER_STATE_STOP,
+
+ /* must be last*/
+ CONTROL_PROTOCOL__HW_INFER_STATE_COUNT
+} CONTROL_PROTOCOL__hw_infer_state_t;
+
+typedef struct {
+ uint32_t hw_infer_state_length;
+ uint8_t hw_infer_state;
+ uint32_t application_index_length;
+ uint8_t application_index;
+ uint32_t dynamic_batch_size_length;
+ uint16_t dynamic_batch_size;
+ uint32_t channels_info_length;
+ CONTROL_PROTOCOL__hw_infer_channels_info_t channels_info;
+} CONTROL_PROTOCOL__change_hw_infer_status_request_t;
+
typedef union {
CONTROL_PROTOCOL_identify_response_t identity_response;
CONTROL_PROTOCOL__core_identify_response_t core_identity_response;
CONTROL_PROTOCOL__get_throttling_state_response_t get_throttling_state_response;
CONTROL_PROTOCOL__get_overcurrent_state_response_t get_overcurrent_state_response;
CONTROL_PROTOCOL__get_hw_consts_response_t get_hw_consts_response;
+ CONTROL_PROTOCOL__change_hw_infer_status_response_t change_hw_infer_status_response;
// Note: This array is larger than any legal request:
// * Functions in this module won't write more than CONTROL_PROTOCOL__MAX_CONTROL_LENGTH bytes
CONTROL_PROTOCOL__sensor_set_i2c_bus_index_t sensor_set_i2c_bus_index;
CONTROL_PROTOCOL__set_overcurrent_state_request_t set_overcurrent_state_request;
CONTROL_PROTOCOL__set_sleep_state_request_t set_sleep_state_request;
+ CONTROL_PROTOCOL__change_hw_infer_status_request_t change_hw_infer_status_request;
// Note: This array is larger than any legal request:
// * Functions in this module won't write more than CONTROL_PROTOCOL__MAX_CONTROL_LENGTH bytes
// when recieving a pointer to CONTROL_PROTOCOL__request_parameters_t.
bool is_first_control_per_context;
bool is_last_control_per_context;
uint8_t context_type; // CONTROL_PROTOCOL__context_switch_context_type_t
- uint32_t actions_count;
uint32_t context_network_data_length;
uint8_t context_network_data[CONTROL_PROTOCOL__CONTEXT_NETWORK_DATA_SINGLE_CONTROL_MAX_SIZE];
} CONTROL_PROTOCOL__context_switch_context_info_single_control_t;
#include "utils.h"
#define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
-#define FIRMWARE_HEADER_MAGIC_MERCURY (0xE905DAAB)
+#define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
typedef enum {
FIRMWARE_HEADER_VERSION_INITIAL = 0,
typedef enum {
FIRMWARE_TYPE_HAILO8 = 0,
- FIRMWARE_TYPE_MERCURY
+ FIRMWARE_TYPE_HAILO15
} firmware_type_t;
#ifdef MERCURY
-#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_MERCURY)
+#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO15)
#elif defined(HAILO8_B0)
#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO8)
#endif /* MERCURY */
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_SET_SLEEP_STATE_FAILED)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_SLEEP_STATE_SIZE)\
FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_SLEEP_STATE)\
+ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_HW_INFER_STATE_LENGTH)\
+ FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CHANNELS_INFO_LENGTH)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__POWER_MEASUREMENT)\
FIRMWARE_STATUS__X(HAILO_POWER_MEASUREMENT_STATUS_POWER_INIT_ERROR)\
FIRMWARE_STATUS__X(CSM_CONFIG_MANAGER_STATUS_CSM_BURST_COUNTER_IS_NOT_ZERO)\
FIRMWARE_STATUS__X(CSM_CONFIG_MANAGER_STATUS_CSM_CREDIT_COUNTER_IS_NOT_ZERO)\
FIRMWARE_STATUS__X(CSM_CONFIG_MANAGER_STATUS_CSM_FIFO_NOT_EMPTY)\
+ FIRMWARE_STATUS__X(CSM_CONFIG_MANAGER_STATUS_INVALID_HOST_PAGE_SIZE)\
+ FIRMWARE_STATUS__X(CSM_CONFIG_MANAGER_STATUS_INVALID_BUFFER_SIZE)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__PCIE_CONFIG_MANAGER)\
FIRMWARE_STATUS__X(PCIE_CONFIG_MANAGER_STATUS_NOT_IMPLEMENTED)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_NOT_SUPPORTED_OPERATION)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_NETWORK_INDEX)\
FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_NMS_UNIT_INDEX)\
+ FIRMWARE_STATUS__X(NMS_MANAGER_STATUS_INVALID_BATCH_SIZE)\
\
FIRMWARE_MODULE__X(FIRMWARE_MODULE__CLUSTER_MANAGER)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_CLUSTER_INDEX)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_LCU_INDEX)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_ADDRESS)\
FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_RECEIVED_UNEXPECTED_INTERRUPT)\
+ \
+ FIRMWARE_MODULE__X(FIRMWARE_MODULE__HW_INFER_MANAGER)\
+ FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_NETWORK_GROUP_NOT_CONFIGURED_BEFORE_INFER_START)\
+ FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_NETWORK_GROUP_ALREADY_ACTIVATED)\
+ FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_STATE_MACHINE_NOT_IN_RESET_STATE_BEFORE_DEACTIVATE)\
+ FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_INVALID_STATE)\
typedef enum {
case FIRMWARE_TYPE_HAILO8:
firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO8;
break;
- case FIRMWARE_TYPE_MERCURY:
- firmware_magic = FIRMWARE_HEADER_MAGIC_MERCURY;
+ case FIRMWARE_TYPE_HAILO15:
+ firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
break;
default:
status = HAILO_STATUS__FIRMWARE_HEADER_UTILS__INVALID_FIRMWARE_TYPE;
option(HAILO_BUILD_SERVICE "Build hailort service" OFF)
option(HAILO_BUILD_PROFILER "Build hailort profiler" ON)
-if(WIN32 AND ${HAILO_BUILD_SERVICE})
- message(FATAL_ERROR "HailoRT service is not supported on Windows")
-endif()
-
# Flag for emulator (FPGA/Veloce)
if(HAILO_BUILD_EMULATOR)
message(WARNING "HailoRT is building with Emulator flag on")
# Set firmware version
add_definitions( -DFIRMWARE_VERSION_MAJOR=4 )
-add_definitions( -DFIRMWARE_VERSION_MINOR=12 )
-add_definitions( -DFIRMWARE_VERSION_REVISION=1 )
+add_definitions( -DFIRMWARE_VERSION_MINOR=13 )
+add_definitions( -DFIRMWARE_VERSION_REVISION=0 )
if(HAILO_BUILD_SERVICE)
add_definitions( -DHAILO_SUPPORT_MULTI_PROCESS )
endif()
if(NOT PYTHON_EXECUTABLE AND PYBIND11_PYTHON_VERSION)
# PYBIND11_PYTHON_VERSION is prioritized (not virtual environment) if PYTHON_EXECUTABLE is not set.
# See https://pybind11.readthedocs.io/en/stable/changelog.html#v2-6-0-oct-21-2020
- if(${CMAKE_VERSION} VERSION_LESS "3.22.0")
+ if((${CMAKE_VERSION} VERSION_LESS "3.22.0") AND (NOT WIN32))
find_package(PythonInterp ${PYBIND11_PYTHON_VERSION} REQUIRED)
set(PYTHON_EXECUTABLE ${Python_EXECUTABLE})
else()
${HAILORT_COMMON_OS_DIR}/filesystem.cpp
${HAILORT_COMMON_OS_DIR}/socket.cpp
${HAILORT_COMMON_OS_DIR}/process.cpp
+ ${HAILORT_COMMON_OS_DIR}/os_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/barrier.cpp
${CMAKE_CURRENT_SOURCE_DIR}/file_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/string_utils.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/device_measurements.cpp
)
if(WIN32)
#include <thread>
#include <memory>
+#include "common/os_utils.hpp"
+
namespace hailort
{
template<typename T>
class AsyncThread final {
public:
- explicit AsyncThread(std::function<T(void)> func) :
+ AsyncThread(const std::string &name, std::function<T(void)> func) :
m_result(),
- m_thread([this, func]() {
+ m_thread([this, name, func]() {
+ if (!name.empty()) {
+ OsUtils::set_current_thread_name(name);
+ }
m_result = func();
})
{}
+ explicit AsyncThread(std::function<T(void)> func) : AsyncThread("", func)
+ {}
+
+ ~AsyncThread()
+ {
+ // Join on the thread. this can be a blocking operation, so to avoid it the user must call .get()
+ // before the object gets destracted (same behavoiur as in std::future returned from std::async).
+ get();
+ }
+
/**
* NOTE! this object is not moveable by purpose, on creation we create a lambda that take `this`, if we
- * move the object `this` will change and the callback will be wrong. Use exeternal storage like std::unique_ptr
+ * move the object `this` will change and the callback will be wrong. Use external storage like std::unique_ptr
* to move the object (or to put it inside a container)
*/
AsyncThread(const AsyncThread<T> &) = delete;
#include "hailo/platform.h"
#include "common/utils.hpp"
#include <array>
+#include <iterator>
namespace hailort
{
MIN(CB_PROG((circbuf), (head), (tail)), (circbuf).size - (tail))
+template<typename T>
+struct is_std_array : public std::false_type {};
+
+template<typename T, std::size_t N>
+struct is_std_array<std::array<T, N>> : public std::true_type {};
+
// TODO: implement more functionalities, better move semantic handle
// TODO: support consts methods (front(), empty()), right now CB_* macros requires non const pointer to head+tail
-template<typename T>
+template<typename T, typename Container = std::vector<T>>
class CircularArray final
{
public:
- static_assert(std::is_pod<T>::value, "CircularArray can be used only with POD type");
+ static_assert(std::is_default_constructible<T>::value, "CircularArray object must be default constructible");
+
+ // Based on https://en.cppreference.com/w/cpp/iterator/iterator
+ class iterator: public std::iterator<std::input_iterator_tag, // iterator_category
+ T, // value_type
+ int, // difference_type
+ int, // pointer
+ T&> // reference
+ {
+ public:
+ explicit iterator(int index, CircularArray &array) : m_array(array), m_index(index) {}
+ iterator& operator++() { m_index = ((m_index + 1) & m_array.m_circ.size_mask); return *this; }
+ iterator operator++(int) { iterator retval = *this; ++(*this); return retval; }
+ bool operator==(iterator other) const { return m_index == other.m_index; }
+ bool operator!=(iterator other) const { return !(*this == other); }
+ T &operator*() const { return m_array.m_array[m_index]; }
+ private:
+ CircularArray &m_array;
+ int m_index;
+ };
+
+ // Ctor for Container=std::vector
+ template <typename C=Container,
+ class = typename std::enable_if_t<std::is_same<C, std::vector<T>>::value>>
CircularArray(size_t storage_size)
{
// storage size must be a power of 2
m_array.resize(storage_size);
}
+ // Ctor for Container=std::array
+ template <typename C=Container,
+ class = typename std::enable_if_t<is_std_array<C>::value>>
+ CircularArray(size_t storage_size, int = 0)
+ {
+ // storage size must be a power of 2
+ assert(is_powerof2(storage_size));
+ assert(storage_size <= std::tuple_size<C>::value);
+ CB_INIT(m_circ, storage_size);
+ }
+
+ void push_back(T &&element)
+ {
+ assert(!full());
+ m_array[CB_HEAD(m_circ)] = std::move(element);
+ CB_ENQUEUE(m_circ, 1);
+ }
+
void push_back(const T& element)
{
- assert(CB_AVAIL(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ)));
+ assert(!full());
m_array[CB_HEAD(m_circ)] = element;
CB_ENQUEUE(m_circ, 1);
}
void pop_front()
{
+ assert(!empty());
+ // Clear previous front
+ m_array[CB_TAIL(m_circ)] = T();
CB_DEQUEUE(m_circ, 1);
}
T &front()
{
+ assert(!empty());
return m_array[CB_TAIL(m_circ)];
}
+ void reset()
+ {
+ // pop all fronts to make sure all destructors are called.
+ // TODO: if T is std::is_trivial, we can just reset the counters
+ const auto original_size = size();
+ for (size_t i = 0 ; i < original_size; i++) {
+ pop_front();
+ }
+ }
+
bool empty()
{
return CB_HEAD(m_circ) == CB_TAIL(m_circ);
return CB_PROG(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
}
+ size_t capacity()
+ {
+ return CB_SIZE(m_circ) - 1;
+ }
+
+ iterator begin()
+ {
+ return iterator(CB_TAIL(m_circ), *this);
+ }
+
+ iterator end()
+ {
+ return iterator(CB_HEAD(m_circ), *this);
+ }
+
private:
circbuf_t m_circ;
- std::vector<T> m_array;
+ Container m_array;
};
} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file device_measurements.hpp
+ * @brief Measure temperature, power and current of Hailo chip
+ **/
+
+
+#include "common/device_measurements.hpp"
+#include "common/utils.hpp"
+
+using namespace hailort;
+
+constexpr std::chrono::milliseconds DEFAULT_MEASUREMENTS_INTERVAL(100);
+
+BaseMeasurement::BaseMeasurement(Device &device, hailo_status &status) :
+ m_device(device),
+ m_is_thread_running(false),
+ m_acc(make_shared_nothrow<FullAccumulator<double>>("BaseMeasurementAccumulator"))
+{
+ if (nullptr == m_acc) {
+ status = HAILO_OUT_OF_HOST_MEMORY;
+ return;
+ }
+ status = HAILO_SUCCESS;
+}
+
+BaseMeasurement::~BaseMeasurement()
+{
+ stop_measurement();
+}
+
+void BaseMeasurement::stop_measurement()
+{
+ m_is_thread_running = false;
+ if (m_thread.joinable()) {
+ m_thread.join();
+ }
+}
+
+AccumulatorResults BaseMeasurement::get_data()
+{
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_acc->get();
+}
+
+Expected<std::shared_ptr<TemperatureMeasurement>> TemperatureMeasurement::create_shared(Device &device)
+{
+ auto status = HAILO_UNINITIALIZED;
+ auto ptr = make_shared_nothrow<TemperatureMeasurement>(device, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return ptr;
+}
+
+
+TemperatureMeasurement::TemperatureMeasurement(Device &device, hailo_status &status) : BaseMeasurement(device, status)
+{}
+
+hailo_status TemperatureMeasurement::start_measurement()
+{
+ // Checking sensor before starting thread
+ auto temp_info = m_device.get_chip_temperature();
+ CHECK_EXPECTED_AS_STATUS(temp_info);
+
+ m_is_thread_running = true;
+ m_thread = std::thread([this] () {
+ while (m_is_thread_running.load()) {
+ auto temp_info = m_device.get_chip_temperature();
+ if (HAILO_SUCCESS != temp_info.status()) {
+ LOGGER__ERROR("Failed to get chip's temperature, status = {}", temp_info.status());
+ m_is_thread_running = false;
+ break;
+ }
+
+ float32_t ts_avg = ((temp_info->ts0_temperature + temp_info->ts1_temperature) / 2);
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_acc->add_data_point(ts_avg, temp_info->sample_count);
+ }
+
+ std::this_thread::sleep_for(DEFAULT_MEASUREMENTS_INTERVAL);
+ }
+ });
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<PowerMeasurement>> PowerMeasurement::create_shared(Device &device,
+ hailo_power_measurement_types_t measurement_type)
+{
+ auto status = HAILO_UNINITIALIZED;
+ auto ptr = make_shared_nothrow<PowerMeasurement>(device, measurement_type, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return ptr;
+}
+
+PowerMeasurement::PowerMeasurement(Device &device, hailo_power_measurement_types_t measurement_type, hailo_status &status)
+ : BaseMeasurement(device, status), m_measurement_type(measurement_type)
+{}
+
+hailo_status PowerMeasurement::start_measurement()
+{
+ // Checking sensor before starting thread
+ auto power_info = m_device.power_measurement(HAILO_DVM_OPTIONS_AUTO, m_measurement_type);
+ CHECK_EXPECTED_AS_STATUS(power_info);
+
+ m_is_thread_running = true;
+ m_thread = std::thread([this] () {
+ while (m_is_thread_running.load()) {
+ auto power_info = m_device.power_measurement(HAILO_DVM_OPTIONS_AUTO, m_measurement_type);
+ if (HAILO_SUCCESS != power_info.status()) {
+ LOGGER__ERROR("Failed to get chip's power, status = {}", power_info.status());
+ m_is_thread_running = false;
+ break;
+ }
+
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_acc->add_data_point(*power_info);
+ }
+
+ std::this_thread::sleep_for(DEFAULT_MEASUREMENTS_INTERVAL);
+ }
+ });
+
+ return HAILO_SUCCESS;
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file device_measurements.hpp
+ * @brief Measure temperature, power and current of Hailo chip
+ **/
+
+#ifndef _HAILO_DEVICE_MEASUREMENTS_HPP_
+#define _HAILO_DEVICE_MEASUREMENTS_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/device.hpp"
+
+#include "common/runtime_statistics_internal.hpp"
+
+#include <thread>
+#include <mutex>
+#include <atomic>
+
+
+class BaseMeasurement
+{
+public:
+ BaseMeasurement(hailort::Device &device, hailo_status &status);
+ virtual ~BaseMeasurement();
+
+ virtual hailo_status start_measurement() = 0;
+ void stop_measurement();
+ hailort::AccumulatorResults get_data();
+
+ virtual std::string measurement_unit() = 0;
+
+protected:
+ hailort::Device &m_device;
+ std::thread m_thread;
+ std::atomic_bool m_is_thread_running;
+ std::mutex m_mutex;
+ hailort::AccumulatorPtr m_acc;
+};
+
+
+class TemperatureMeasurement : public BaseMeasurement
+{
+public:
+ static hailort::Expected<std::shared_ptr<TemperatureMeasurement>> create_shared(hailort::Device &device);
+
+ virtual ~TemperatureMeasurement() = default;
+
+ virtual hailo_status start_measurement() override;
+
+ virtual std::string measurement_unit() override
+ {
+ return "C";
+ }
+
+ TemperatureMeasurement(hailort::Device &device, hailo_status &status);
+};
+
+
+class PowerMeasurement : public BaseMeasurement
+{
+public:
+ static hailort::Expected<std::shared_ptr<PowerMeasurement>> create_shared(hailort::Device &device,
+ hailo_power_measurement_types_t measurement_type);
+ virtual ~PowerMeasurement() = default;
+
+ virtual hailo_status start_measurement() override;
+
+ virtual std::string measurement_unit() override
+ {
+ switch (m_measurement_type) {
+ case HAILO_POWER_MEASUREMENT_TYPES__SHUNT_VOLTAGE:
+ case HAILO_POWER_MEASUREMENT_TYPES__BUS_VOLTAGE:
+ return "mV";
+ case HAILO_POWER_MEASUREMENT_TYPES__AUTO:
+ case HAILO_POWER_MEASUREMENT_TYPES__POWER:
+ return "W";
+ case HAILO_POWER_MEASUREMENT_TYPES__CURRENT:
+ return "mA";
+ default:
+ return "Nan";
+ };
+ }
+
+ PowerMeasurement(hailort::Device &device, hailo_power_measurement_types_t measurement_type,
+ hailo_status &status);
+
+private:
+ hailo_power_measurement_types_t m_measurement_type;
+};
+
+#endif /* _HAILO_DEVICE_MEASUREMENTS_HPP_ */
#ifndef __OS_ETHERNET_UTILS_H__
#define __OS_ETHERNET_UTILS_H__
-
+#include <string>
#include <hailo/hailort.h>
#include "hailo/expected.hpp"
if (m_latency_count == 0) {
return make_unexpected(HAILO_NOT_AVAILABLE);
}
-
+
duration latency = (m_latency_sum / m_latency_count);
if (clear) {
m_latency_sum = duration();
return;
}
- duration start = m_start_timestamps.front();
duration end(0);
for (auto &end_timesatmps : m_end_timestamps_per_channel) {
if (end_timesatmps.second.empty()) {
end = std::max(end, end_timesatmps.second.front());
}
+ duration start = m_start_timestamps.front();
+ assert(start <= end);
+
// calculate the latency
m_latency_sum += (end - start);
m_latency_count++;
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file os_utils.cpp
+ * @brief Utilities for Posix methods
+ **/
+
+#include "hailo/hailort.h"
+
+#include "common/os_utils.hpp"
+
+#include "spdlog/sinks/syslog_sink.h"
+
+
+namespace hailort
+{
+
+HailoRTOSLogger::HailoRTOSLogger()
+{
+ m_hailort_os_logger = spdlog::syslog_logger_mt("syslog", "hailort_service", LOG_PID);
+ m_hailort_os_logger->set_pattern("%v");
+ m_hailort_os_logger->set_level(spdlog::level::debug);
+}
+
+uint32_t OsUtils::get_curr_pid()
+{
+ return getpid();
+}
+
+CursorAdjustment::CursorAdjustment(){}
+CursorAdjustment::~CursorAdjustment(){}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file os_utils.cpp
+ * @brief Utilities for Windows methods
+ **/
+
+#include "common/os_utils.hpp"
+#include "hailo/hailort.h"
+
+#include <windows.h>
+#include "spdlog/sinks/win_eventlog_sink.h"
+
+namespace hailort
+{
+
+HailoRTOSLogger::HailoRTOSLogger()
+{
+ auto event_log_sink = std::make_shared<spdlog::sinks::win_eventlog_sink_mt>("hailort_service");
+ m_hailort_os_logger = std::make_shared<spdlog::logger>("eventlog", event_log_sink);
+ event_log_sink->set_pattern("%v");
+ m_hailort_os_logger->set_level(spdlog::level::debug);
+}
+
+uint32_t OsUtils::get_curr_pid()
+{
+ return static_cast<uint32_t>(GetCurrentProcessId());
+}
+
+CursorAdjustment::CursorAdjustment()
+{
+ // Enables Vitual Terminal Processing - enables ANSI Escape Sequences on Windows
+ // Source: https://stackoverflow.com/questions/52607960/how-can-i-enable-virtual-terminal-processing
+ HANDLE h_out = GetStdHandle(STD_OUTPUT_HANDLE);
+ DWORD dword_mode = 0;
+ GetConsoleMode(h_out, &dword_mode);
+ m_previous_output_buffer_mode = dword_mode;
+ dword_mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+ SetConsoleMode(h_out, dword_mode);
+}
+
+CursorAdjustment::~CursorAdjustment()
+{
+ // Return to the original state
+ HANDLE h_out = GetStdHandle(STD_OUTPUT_HANDLE);
+ SetConsoleMode(h_out, m_previous_output_buffer_mode); // Return the output buffer mode to it's original mode
+}
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file os_utils.hpp
+ * @brief Utilities for OS methods
+ **/
+
+#ifndef _HAILO_OS_UTILS_HPP_
+#define _HAILO_OS_UTILS_HPP_
+
+#include "hailo/hailort.h"
+
+#include "common/logger_macros.hpp"
+
+
+namespace hailort
+{
+
+class HailoRTOSLogger final
+{
+public:
+ static HailoRTOSLogger& get_instance()
+ {
+ static HailoRTOSLogger instance;
+ return instance;
+ }
+
+ std::shared_ptr<spdlog::logger> logger()
+ {
+ return m_hailort_os_logger;
+ }
+
+private:
+ HailoRTOSLogger();
+ std::shared_ptr<spdlog::logger> m_hailort_os_logger;
+};
+
+class CursorAdjustment final
+{
+public:
+ CursorAdjustment();
+ ~CursorAdjustment();
+private:
+#if defined(_WIN32)
+ unsigned int m_previous_output_buffer_mode;
+#endif /* _WIN32 */
+};
+
+#define _HAILORT_OS_LOG(level, ...) SPDLOG_LOGGER_CALL(hailort::HailoRTOSLogger::get_instance().logger(), level, __VA_ARGS__)
+#define HAILORT_OS_LOG_INFO(...) _HAILORT_OS_LOG(spdlog::level::info, __VA_ARGS__)
+#define HAILORT_OS_LOG_WARNNING(...) _HAILORT_OS_LOG(spdlog::level::warn, __VA_ARGS__)
+#define HAILORT_OS_LOG_ERROR(...) _HAILORT_OS_LOG(spdlog::level::err, __VA_ARGS__)
+
+class OsUtils final
+{
+public:
+ OsUtils() = delete;
+ static uint32_t get_curr_pid();
+
+ static void set_current_thread_name(const std::string &name)
+ {
+ (void)name;
+#ifndef NDEBUG
+#ifndef _WIN32
+ // pthread_setname_np name size is limited to 16 chars (including null terminator)
+ assert(name.size() < 16);
+ pthread_setname_np(pthread_self(), name.c_str());
+#else
+// TODO: implement for windows
+#endif /* _WIN32 */
+#endif /* NDEBUG */
+ }
+
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_OS_UTILS_HPP_ */
FullAccumulator &operator=(const FullAccumulator &) = delete;
virtual ~FullAccumulator() = default;
- virtual void add_data_point(T data) override
+ virtual void add_data_point(T data, uint32_t samples_count = 1) override
{
std::lock_guard<std::recursive_mutex> lock_guard(m_lock);
m_min = std::min(m_min, static_cast<double>(data));
m_max = std::max(m_max, static_cast<double>(data));
- m_count++;
+ m_count += samples_count;
// mean, variance, sd and mean_sd are calculated using Welford's_online_algorithm.
// See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
const auto delta = static_cast<double>(data) - m_mean;
- m_mean += delta / static_cast<double>(m_count);
+ m_mean += ((delta * samples_count )/ static_cast<double>(m_count));
m_M2 += delta * (static_cast<double>(data) - m_mean);
}
// data is a duration of time.
// However, the statistics collected will be in frames per seconds (i.e. time^-1).
- virtual void add_data_point(T data) override
+ virtual void add_data_point(T data, uint32_t samples_count = 1) override
{
assert(0 != data);
// Note: 'this' is needed to access protected members of a template base class
this->m_min = std::min(this->m_min, data_inverse);
this->m_max = std::max(this->m_max, data_inverse);
- this->m_count++;
+ this->m_count += samples_count;
// mean, variance, sd and mean_sd are calculated using Welford's_online_algorithm.
// See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
const auto delta = data_inverse - this->m_mean;
// We calculate the arithmatic mean
- this->m_mean = static_cast<double>(this->m_count) / static_cast<double>(m_sum);
+ this->m_mean = static_cast<double>(this->m_count * samples_count) / static_cast<double>(m_sum);
this->m_M2 += delta * (data_inverse - this->m_mean);
}
#define CHECK_SUCCESS_AS_EXPECTED(status, ...) _CHECK_SUCCESS_AS_EXPECTED(status, ISEMPTY(__VA_ARGS__), "" __VA_ARGS__)
#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#define _CHECK_SUCCESS_AS_RPC_STATUS(status, reply, is_default, fmt, ...) \
- do { \
- const auto &__check_success_status = (status); \
- if (__check_success_status != HAILO_SUCCESS) { \
- reply->set_status(static_cast<uint32_t>(__check_success_status)); \
- LOGGER__ERROR( \
- _CONSTRUCT_MSG(is_default, "CHECK_SUCCESS_AS_RPC_STATUS failed with status={}", fmt, __check_success_status, ##__VA_ARGS__) \
- ); \
- return grpc::Status::OK; \
- } \
+#define _CHECK_SUCCESS_AS_RPC_STATUS(status, reply, is_default, fmt, ...) \
+ do { \
+ const auto &__check_success_status = (status); \
+ reply->set_status(static_cast<uint32_t>(__check_success_status)); \
+ _CHECK( \
+ HAILO_SUCCESS == __check_success_status, \
+ grpc::Status::OK, \
+ _CONSTRUCT_MSG(is_default, "CHECK_SUCCESS_AS_RPC_STATUS failed with status={}", fmt, __check_success_status, ##__VA_ARGS__) \
+ ); \
} while(0)
#define CHECK_SUCCESS_AS_RPC_STATUS(status, reply, ...) _CHECK_SUCCESS_AS_RPC_STATUS(status, reply, ISEMPTY(__VA_ARGS__), "" __VA_ARGS__)
#define _HAILO_IOCTL_COMMON_H_
-#define DESCRIPTORS_IN_BUFFER(buffer_size, desc_page_size) (((buffer_size) + (desc_page_size) - 1) / (desc_page_size))
-
// This value is not easily changeable.
// For example: the channel interrupts ioctls assume we have up to 32 channels
#define MAX_VDMA_CHANNELS_PER_ENGINE (32)
#define CHANNEL_IRQ_TIMESTAMPS_SIZE (128 * 2) // Should be same as MAX_IRQ_TIMESTAMPS_SIZE (hailort_driver.hpp)
#define CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK (CHANNEL_IRQ_TIMESTAMPS_SIZE - 1)
-#define INVALID_CHANNEL_HANDLE_VALUE ((uint64_t)-1)
#define INVALID_DRIVER_HANDLE_VALUE ((uintptr_t)-1)
// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
uintptr_t desc_handle; // in
uint16_t desc_page_size; // in
uint8_t channel_index; // in
- size_t offset; // in
+ uint32_t starting_desc; // in
};
-/* structure used in ioctl HAILO_VDMA_CHANNEL_ENABLE */
-struct hailo_vdma_channel_enable_params {
- uint8_t engine_index; // in
- uint8_t channel_index; // in
- enum hailo_dma_data_direction direction; // in
- bool enable_timestamps_measure; // in
- uint64_t channel_handle; // out
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_ENABLE */
+struct hailo_vdma_interrupts_enable_params {
+ uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
+ bool enable_timestamps_measure; // in
};
-/* structure used in ioctl HAILO_VDMA_CHANNEL_DISABLE */
-struct hailo_vdma_channel_disable_params {
- uint8_t engine_index; // in
- uint8_t channel_index; // in
- uint64_t channel_handle; // in
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_DISABLE */
+struct hailo_vdma_interrupts_disable_params {
+ uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
};
-/* structure used in ioctl HAILO_VDMA_CHANNEL_WAIT_INT */
-struct hailo_vdma_channel_wait_params {
- uint8_t engine_index; // in
- uint8_t channel_index; // in
- uint64_t channel_handle; // in
- uint64_t timeout_ms; // in
- uint32_t timestamps_count; // inout
-// In linux send address to local buffer because there isnt room on stack for array
-#if defined(__linux__)
- struct hailo_channel_interrupt_timestamp *timestamps; // out
-#elif defined(__QNX__) || defined(_MSC_VER)
- struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE]; // out
-#else
-#error "unsupported platform!"
-#endif // __linux__
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_WAIT */
+struct hailo_vdma_interrupts_channel_data {
+ uint8_t engine_index;
+ uint8_t channel_index;
+ bool is_active; // If not activate, num_processed is ignored.
+ uint16_t host_num_processed;
+ uint8_t host_error; // Channel errors bits on source side
+ uint8_t device_error; // Channel errors bits on dest side
};
-/* structure used in ioctl HAILO_VDMA_CHANNEL_ABORT */
-struct hailo_vdma_channel_abort_params {
- uint8_t engine_index; // in
- uint8_t channel_index; // in
- uint64_t channel_handle; // in
+struct hailo_vdma_interrupts_wait_params {
+ uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
+ uint8_t channels_count; // out
+ struct hailo_vdma_interrupts_channel_data
+ irq_data[MAX_VDMA_CHANNELS_PER_ENGINE * MAX_VDMA_ENGINES]; // out
};
-/* structure used in ioctl HAILO_VDMA_CHANNEL_CLEAR_ABORT */
-struct hailo_vdma_channel_clear_abort_params {
- uint8_t engine_index; // in
- uint8_t channel_index; // in
- uint64_t channel_handle; // in
+/* structure used in ioctl HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS */
+struct hailo_vdma_interrupts_read_timestamp_params {
+ uint8_t engine_index; // in
+ uint8_t channel_index; // in
+ uint32_t timestamps_count; // out
+ struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE]; // out
};
/* structure used in ioctl HAILO_FW_CONTROL */
};
struct hailo_vdma_buffer_sync_params {
- size_t handle; // in
- enum hailo_vdma_buffer_sync_type sync_type; // in
- void* buffer_address; // in
- uint64_t buffer_size; // in
+ size_t handle; // in
+ enum hailo_vdma_buffer_sync_type sync_type; // in
+ size_t offset; // in
+ size_t count; // in
};
/* structure used in ioctl HAILO_READ_NOTIFICATION */
enum hailo_board_type {
HAILO_BOARD_TYPE_HAILO8 = 0,
- HAILO_BOARD_TYPE_MERCURY,
+ HAILO_BOARD_TYPE_HAILO15,
HAILO_BOARD_TYPE_COUNT,
/** Max enum value to maintain ABI Integrity */
#define HAILO_RESET_NN_CORE _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
enum hailo_vdma_ioctl_code {
- HAILO_VDMA_CHANNEL_ENABLE_CODE,
- HAILO_VDMA_CHANNEL_DISABLE_CODE,
- HAILO_VDMA_CHANNEL_WAIT_INT_CODE,
- HAILO_VDMA_CHANNEL_ABORT_CODE,
- HAILO_VDMA_CHANNEL_CLEAR_ABORT_CODE,
+ HAILO_VDMA_INTERRUPTS_ENABLE_CODE,
+ HAILO_VDMA_INTERRUPTS_DISABLE_CODE,
+ HAILO_VDMA_INTERRUPTS_WAIT_CODE,
+ HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE,
HAILO_VDMA_CHANNEL_READ_REGISTER_CODE,
HAILO_VDMA_CHANNEL_WRITE_REGISTER_CODE,
HAILO_VDMA_BUFFER_MAP_CODE,
HAILO_VDMA_IOCTL_MAX_NR,
};
-#define HAILO_VDMA_CHANNEL_ENABLE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_ENABLE_CODE, struct hailo_vdma_channel_enable_params)
-#define HAILO_VDMA_CHANNEL_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_DISABLE_CODE, struct hailo_vdma_channel_disable_params)
-#define HAILO_VDMA_CHANNEL_WAIT_INT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_WAIT_INT_CODE, struct hailo_vdma_channel_wait_params)
-#define HAILO_VDMA_CHANNEL_ABORT _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_ABORT_CODE, struct hailo_vdma_channel_abort_params)
-#define HAILO_VDMA_CHANNEL_CLEAR_ABORT _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_CLEAR_ABORT_CODE, struct hailo_vdma_channel_clear_abort_params)
-#define HAILO_VDMA_CHANNEL_READ_REGISTER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_READ_REGISTER_CODE, struct hailo_vdma_channel_read_register_params)
-#define HAILO_VDMA_CHANNEL_WRITE_REGISTER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_WRITE_REGISTER_CODE, struct hailo_vdma_channel_write_register_params)
+#define HAILO_VDMA_INTERRUPTS_ENABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_ENABLE_CODE, struct hailo_vdma_interrupts_enable_params)
+#define HAILO_VDMA_INTERRUPTS_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_DISABLE_CODE, struct hailo_vdma_interrupts_disable_params)
+#define HAILO_VDMA_INTERRUPTS_WAIT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_WAIT_CODE, struct hailo_vdma_interrupts_wait_params)
+#define HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE, struct hailo_vdma_interrupts_read_timestamp_params)
+
+#define HAILO_VDMA_CHANNEL_READ_REGISTER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_READ_REGISTER_CODE, struct hailo_vdma_channel_read_register_params)
+#define HAILO_VDMA_CHANNEL_WRITE_REGISTER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CHANNEL_WRITE_REGISTER_CODE, struct hailo_vdma_channel_write_register_params)
-#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
-#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
-#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
+#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
+#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
+#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
-#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
-#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, uintptr_t)
-#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
+#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
+#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, uintptr_t)
+#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
-#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
-#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IO_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE)
+#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
+#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IO_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE)
-#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
+#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
-#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
-#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IO_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE)
+#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
+#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IO_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE)
enum hailo_non_linux_ioctl_code {
#define HAILO_CMD_MAP_BUFFER 0x0051
#define HAILO_CMD_FREE_MEMORY 0x0060
#define HAILO_CMD_ALLOC_MEMORY 0x0061
-#define HAILO_CMD_ABORT_ALL 0x0070
#define HAILO_IOCTL_COMPATIBLE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x802, METHOD_BUFFERED, FILE_ANY_ACCESS)
struct tCompatibleHailoIoctlParam
ULONG_PTR Value;
union {
hailo_memory_transfer_params MemoryTransfer;
- hailo_vdma_channel_enable_params ChannelEnable;
- hailo_vdma_channel_disable_params ChannelDisable;
- hailo_vdma_channel_wait_params ChannelWait;
- hailo_vdma_channel_abort_params ChannelAbort;
- hailo_vdma_channel_clear_abort_params ChannelClearAbort;
+ hailo_vdma_interrupts_enable_params VdmaInterruptsEnable;
+ hailo_vdma_interrupts_disable_params VdmaInterruptsDisable;
+ hailo_vdma_interrupts_read_timestamp_params VdmaInterruptsReadTimestamps;
+ hailo_vdma_interrupts_wait_params VdmaInterruptsWait;
hailo_vdma_buffer_sync_params VdmaBufferSync;
hailo_fw_control FirmwareControl;
hailo_vdma_buffer_map_params VdmaBufferMap;
cmake_minimum_required(VERSION 3.0.0)
+if(WIN32)
+ set(HAILORT_SERVICE_OS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/windows")
+elseif(UNIX)
+ set(HAILORT_SERVICE_OS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/unix")
+else()
+ message(FATAL_ERROR "Unexpeced platform target, stopping build")
+endif()
+
add_executable(hailort_service
hailort_rpc_service.cpp
- hailort_service.cpp
service_resource_manager.hpp
+ ${HAILORT_SERVICE_OS_DIR}/hailort_service.cpp
${HAILORT_COMMON_CPP_SOURCES}
)
target_compile_options(hailort_service PRIVATE ${HAILORT_COMPILE_OPTIONS})
libhailort
spdlog::spdlog
grpc++_unsecure
- hailort_rpc_grpc_proto)
+ hailort_rpc_grpc_proto
+)
+if(WIN32)
+ # Needed in order to compile eth utils (we compile here ${HAILORT_COMMON_CPP_SOURCES}, consider removing)
+ target_link_libraries(hailort_service Iphlpapi Shlwapi Kernel32 Advapi32)
+endif()
+
target_include_directories(hailort_service
PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}
${HAILORT_INC_DIR}
${HAILORT_COMMON_DIR}
${COMMON_INC_DIR}
set(SYSTEMD_UNIT_DIR ${CMAKE_INSTALL_PREFIX}/${SYSTEMD_UNIT_DIR})
endif()
-# Install service's environment variables file
-set(ENV_VARS_FILE_DIR ${CMAKE_INSTALL_SYSCONFDIR}/default/)
-set(HAILORT_SERVICE_ENV_VARS_FILE ${CMAKE_CURRENT_SOURCE_DIR}/hailort_service)
-install(
- FILES "${HAILORT_SERVICE_ENV_VARS_FILE}"
- DESTINATION "${ENV_VARS_FILE_DIR}"
- CONFIGURATIONS Release
- COMPONENT hailort_service
-)
+if (UNIX)
+ # Install service's environment variables file
+ set(ENV_VARS_FILE_DIR ${CMAKE_INSTALL_SYSCONFDIR}/default/)
+ set(HAILORT_SERVICE_ENV_VARS_FILE ${CMAKE_CURRENT_SOURCE_DIR}/hailort_service)
+ install(
+ FILES "${HAILORT_SERVICE_ENV_VARS_FILE}"
+ DESTINATION "${ENV_VARS_FILE_DIR}"
+ CONFIGURATIONS Release
+ COMPONENT hailort_service
+ )
-# Install systemd unit file
-set(HAILORT_SERVICE_UNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/hailort.service)
-install(
- FILES "${HAILORT_SERVICE_UNIT_FILE}"
- DESTINATION "${SYSTEMD_UNIT_DIR}"
- CONFIGURATIONS Release
- COMPONENT hailort_service
-)
+ # Install systemd unit file
+ set(HAILORT_SERVICE_UNIT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/hailort.service)
+ install(
+ FILES "${HAILORT_SERVICE_UNIT_FILE}"
+ DESTINATION "${SYSTEMD_UNIT_DIR}"
+ CONFIGURATIONS Release
+ COMPONENT hailort_service
+ )
+endif()
install(
TARGETS hailort_service
# Create empty directory for default PID file
install(DIRECTORY DESTINATION ${DAEMON_PID_DIR})
-endif()
\ No newline at end of file
+endif()
* @brief Implementation of the hailort rpc service
**/
-#include "hailort_rpc_service.hpp"
-#include "rpc/rpc_definitions.hpp"
-#include "service_resource_manager.hpp"
-#include "common/utils.hpp"
#include "hailo/network_group.hpp"
#include "hailo/vdevice.hpp"
#include "hailo/vstream.hpp"
#include "hailo/hailort_common.hpp"
-#include <syslog.h>
+
+#include "common/utils.hpp"
+#include "common/os_utils.hpp"
+
+#include "hailort_rpc_service.hpp"
+#include "rpc/rpc_definitions.hpp"
+#include "service_resource_manager.hpp"
+
+#include <thread>
namespace hailort
{
-grpc::Status HailoRtRpcService::client_keep_alive(grpc::ServerContext *ctx, const keepalive_Request *request,
+HailoRtRpcService::HailoRtRpcService()
+ : ProtoHailoRtRpc::Service()
+{
+ m_keep_alive = make_unique_nothrow<std::thread>([this] () {
+ this->keep_alive();
+ });
+}
+
+void HailoRtRpcService::keep_alive()
+{
+ while (true) {
+ std::this_thread::sleep_for(hailort::HAILO_KEEPALIVE_INTERVAL / 2);
+ auto now = std::chrono::high_resolution_clock::now();
+ std::unique_lock<std::mutex> lock(m_mutex);
+ std::set<uint32_t> pids_to_remove;
+ for (auto pid_to_last_alive : m_clients_pids) {
+ auto duration = std::chrono::duration_cast<std::chrono::seconds>(now - pid_to_last_alive.second);
+ if (duration > hailort::HAILO_KEEPALIVE_INTERVAL) {
+ auto client_id = pid_to_last_alive.first;
+ pids_to_remove.insert(client_id);
+ LOGGER__INFO("Client disconnected, pid: {}", client_id);
+ HAILORT_OS_LOG_INFO("Client disconnected, pid: {}", client_id);
+ ServiceResourceManager<OutputVStream>::get_instance().release_by_pid(client_id);
+ ServiceResourceManager<InputVStream>::get_instance().release_by_pid(client_id);
+ ServiceResourceManager<ConfiguredNetworkGroup>::get_instance().release_by_pid(client_id);
+ ServiceResourceManager<VDevice>::get_instance().release_by_pid(client_id);
+ }
+ }
+ for (auto &pid : pids_to_remove) {
+ m_clients_pids.erase(pid);
+ }
+ }
+}
+
+grpc::Status HailoRtRpcService::client_keep_alive(grpc::ServerContext*, const keepalive_Request *request,
empty*)
{
- auto client_id = request->process_id();
- while (!ctx->IsCancelled()) {
- sleep(hailort::HAILO_KEEPALIVE_INTERVAL_SEC);
- }
- LOGGER__INFO("Client disconnected, pid: {}", client_id);
- syslog(LOG_NOTICE, "Client disconnected, pid: %i", client_id);
- ServiceResourceManager<OutputVStream>::get_instance().release_by_pid(client_id);
- ServiceResourceManager<InputVStream>::get_instance().release_by_pid(client_id);
- ServiceResourceManager<ConfiguredNetworkGroup>::get_instance().release_by_pid(client_id);
- ServiceResourceManager<VDevice>::get_instance().release_by_pid(client_id);
+ auto client_id = request->pid();
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_clients_pids[client_id] = std::chrono::high_resolution_clock::now();
return grpc::Status::OK;
}
return grpc::Status::OK;
}
+grpc::Status HailoRtRpcService::VDevice_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
+ dup_handle_Reply* reply)
+{
+ auto &manager = ServiceResourceManager<VDevice>::get_instance();
+ auto handle = manager.dup_handle(request->pid(), request->handle());
+ reply->set_handle(handle);
+ return grpc::Status::OK;
+}
+
grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
VDevice_create_Reply *reply)
{
}
hailo_vdevice_params_t params = {
- .device_count = params_proto.device_count(),
- .device_ids = device_ids.data(),
- .scheduling_algorithm = static_cast<hailo_scheduling_algorithm_e>(params_proto.scheduling_algorithm()),
- .group_id = params_proto.group_id().c_str(),
- .multi_process_service = false
+ params_proto.device_count(),
+ device_ids.data(),
+ static_cast<hailo_scheduling_algorithm_e>(params_proto.scheduling_algorithm()),
+ params_proto.group_id().c_str(),
+ false
};
auto vdevice = VDevice::create(params);
NetworkGroupsParamsMap configure_params_map;
for (auto &name_configure_params_pair : request->configure_params_map()) {
- ConfigureNetworkParams network_configure_params;
+ ConfigureNetworkParams network_configure_params{};
auto proto_configure_params = name_configure_params_pair.params();
network_configure_params.batch_size = static_cast<uint16_t>(proto_configure_params.batch_size());
network_configure_params.power_mode = static_cast<hailo_power_mode_t>(proto_configure_params.power_mode());
for (auto &proto_name_streams_params_pair : proto_configure_params.stream_params_map()) {
auto proto_streams_params = proto_name_streams_params_pair.params();
auto stream_direction = static_cast<hailo_stream_direction_t>(proto_streams_params.direction());
- hailo_stream_parameters_t stream_params;
+ hailo_stream_parameters_t stream_params{};
+ stream_params.stream_interface = static_cast<hailo_stream_interface_t>(proto_streams_params.stream_interface());
+ stream_params.direction = stream_direction;
+ stream_params.flags = static_cast<hailo_stream_flags_t>(proto_streams_params.flags());
if (stream_direction == HAILO_H2D_STREAM) {
- stream_params = {
- .stream_interface = static_cast<hailo_stream_interface_t>(proto_streams_params.stream_interface()),
- .direction = stream_direction,
- {.pcie_input_params = {
- .reserved = 0
- }}
- };
+ stream_params.pcie_input_params = {0};
} else {
- stream_params = {
- .stream_interface = static_cast<hailo_stream_interface_t>(proto_streams_params.stream_interface()),
- .direction = stream_direction,
- {.pcie_output_params = {
- .reserved = 0
- }}
- };
+ stream_params.pcie_output_params = {0};
}
network_configure_params.stream_params_by_name.insert({proto_name_streams_params_pair.name(), stream_params});
}
for (auto &proto_name_network_params_pair : proto_configure_params.network_params_map()) {
auto proto_network_params = proto_name_network_params_pair.params();
hailo_network_parameters_t net_params {
- .batch_size = static_cast<uint16_t>(proto_network_params.batch_size())
+ static_cast<uint16_t>(proto_network_params.batch_size())
};
network_configure_params.network_params_by_name.insert({proto_name_network_params_pair.name(), net_params});
return grpc::Status::OK;
}
+grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
+ dup_handle_Reply* reply)
+{
+ auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+ auto handle = manager.dup_handle(request->pid(), request->handle());
+ reply->set_handle(handle);
+ return grpc::Status::OK;
+}
+
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_release(grpc::ServerContext*, const Release_Request *request,
Release_Reply *reply)
{
return grpc::Status::OK;
}
+grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_is_scheduled(grpc::ServerContext*,
+ const ConfiguredNetworkGroup_is_scheduled_Request *request,
+ ConfiguredNetworkGroup_is_scheduled_Reply *reply)
+{
+ auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng) {
+ return cng->is_scheduled();
+ };
+ auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+ auto is_scheduled = manager.execute<bool>(request->handle(), lambda);
+ reply->set_is_scheduled(static_cast<bool>(is_scheduled));
+ reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
+ return grpc::Status::OK;
+}
+
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_timeout(grpc::ServerContext*,
const ConfiguredNetworkGroup_set_scheduler_timeout_Request *request,
ConfiguredNetworkGroup_set_scheduler_timeout_Reply *reply)
return grpc::Status::OK;
}
+grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_priority(grpc::ServerContext*,
+ const ConfiguredNetworkGroup_set_scheduler_priority_Request *request,
+ ConfiguredNetworkGroup_set_scheduler_priority_Reply *reply)
+{
+ auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, uint8_t priority, std::string network_name) {
+ return cng->set_scheduler_priority(priority, network_name);
+ };
+ auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+ auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<uint8_t>(request->priority()),
+ request->network_name());
+ reply->set_status(status);
+ return grpc::Status::OK;
+}
+
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_config_params(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_config_params_Request *request,
ConfiguredNetworkGroup_get_config_params_Reply *reply)
format.order = hailo_format_order_t(user_buffer_format_proto.order());
format.type = hailo_format_type_t(user_buffer_format_proto.type());
hailo_vstream_params_t params = {
- .user_buffer_format = format,
- .timeout_ms = vstream_params_proto.timeout_ms(),
- .queue_size = vstream_params_proto.queue_size(),
- .vstream_stats_flags = hailo_vstream_stats_flags_t(vstream_params_proto.vstream_stats_flags()),
- .pipeline_elements_stats_flags = hailo_pipeline_elem_stats_flags_t(vstream_params_proto.pipeline_elements_stats_flags())
+ format,
+ vstream_params_proto.timeout_ms(),
+ vstream_params_proto.queue_size(),
+ hailo_vstream_stats_flags_t(vstream_params_proto.vstream_stats_flags()),
+ hailo_pipeline_elem_stats_flags_t(vstream_params_proto.pipeline_elements_stats_flags())
};
inputs_params.emplace(param_proto.name(), std::move(params));
}
format.order = hailo_format_order_t(user_buffer_format_proto.order());
format.type = hailo_format_type_t(user_buffer_format_proto.type());
hailo_vstream_params_t params = {
- .user_buffer_format = format,
- .timeout_ms = vstream_params_proto.timeout_ms(),
- .queue_size = vstream_params_proto.queue_size(),
- .vstream_stats_flags = hailo_vstream_stats_flags_t(vstream_params_proto.vstream_stats_flags()),
- .pipeline_elements_stats_flags = hailo_pipeline_elem_stats_flags_t(vstream_params_proto.pipeline_elements_stats_flags())
+ format,
+ vstream_params_proto.timeout_ms(),
+ vstream_params_proto.queue_size(),
+ hailo_vstream_stats_flags_t(vstream_params_proto.vstream_stats_flags()),
+ hailo_pipeline_elem_stats_flags_t(vstream_params_proto.pipeline_elements_stats_flags())
};
output_params.emplace(param_proto.name(), std::move(params));
}
};
auto &manager = ServiceResourceManager<InputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda, MemoryView::create_const(data.data(), data.size()));
+
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("User aborted VStream write.");
+ reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER));
+ return grpc::Status::OK;
+ }
CHECK_SUCCESS_AS_RPC_STATUS(status, reply, "VStream write failed");
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
return grpc::Status::OK;
}
+grpc::Status HailoRtRpcService::InputVStream_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
+ dup_handle_Reply *reply)
+{
+ auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+ auto handle = manager.dup_handle(request->pid(), request->handle());
+ reply->set_handle(handle);
+ return grpc::Status::OK;
+}
+
+grpc::Status HailoRtRpcService::OutputVStream_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
+ dup_handle_Reply *reply)
+{
+ auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+ auto handle = manager.dup_handle(request->pid(), request->handle());
+ reply->set_handle(handle);
+ return grpc::Status::OK;
+}
+
grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_network_infos(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_network_infos_Request *request,
ConfiguredNetworkGroup_get_network_infos_Reply *reply)
};
auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
auto status = manager.execute<hailo_status>(request->handle(), lambda, MemoryView(data.data(), data.size()));
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("User aborted VStream read.");
+ reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER));
+ return grpc::Status::OK;
+ }
CHECK_SUCCESS_AS_RPC_STATUS(status, reply, "VStream read failed");
reply->set_data(data.data(), data.size());
reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
#pragma GCC diagnostic pop
#endif
+#include <thread>
+
namespace hailort
{
class HailoRtRpcService final : public ProtoHailoRtRpc::Service {
-
public:
+ HailoRtRpcService();
+
virtual grpc::Status client_keep_alive(grpc::ServerContext *ctx, const keepalive_Request *request,
empty*) override;
-
virtual grpc::Status get_service_version(grpc::ServerContext *, const get_service_version_Request *request,
get_service_version_Reply *reply) override;
+ virtual grpc::Status VDevice_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
+ dup_handle_Reply*) override;
virtual grpc::Status VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
VDevice_create_Reply *reply) override;
VStream_get_info_Reply *reply) override;
virtual grpc::Status OutputVStream_get_info(grpc::ServerContext*, const VStream_get_info_Request *request,
VStream_get_info_Reply *reply) override;
+ virtual grpc::Status InputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
+ dup_handle_Reply*) override;
+ virtual grpc::Status OutputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
+ dup_handle_Reply*) override;
+
+ virtual grpc::Status ConfiguredNetworkGroup_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
+ dup_handle_Reply*) override;
virtual grpc::Status ConfiguredNetworkGroup_release(grpc::ServerContext*, const Release_Request* request,
Release_Reply* reply) override;
virtual grpc::Status ConfiguredNetworkGroup_make_input_vstream_params(grpc::ServerContext*,
virtual grpc::Status ConfiguredNetworkGroup_get_all_vstream_infos(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_vstream_infos_Request *request,
ConfiguredNetworkGroup_get_vstream_infos_Reply *reply) override;
+ virtual grpc::Status ConfiguredNetworkGroup_is_scheduled(grpc::ServerContext*,
+ const ConfiguredNetworkGroup_is_scheduled_Request *request,
+ ConfiguredNetworkGroup_is_scheduled_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_set_scheduler_timeout(grpc::ServerContext*,
const ConfiguredNetworkGroup_set_scheduler_timeout_Request *request,
ConfiguredNetworkGroup_set_scheduler_timeout_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_set_scheduler_threshold(grpc::ServerContext*,
const ConfiguredNetworkGroup_set_scheduler_threshold_Request *request,
ConfiguredNetworkGroup_set_scheduler_threshold_Reply *reply) override;
+ virtual grpc::Status ConfiguredNetworkGroup_set_scheduler_priority(grpc::ServerContext*,
+ const ConfiguredNetworkGroup_set_scheduler_priority_Request *request,
+ ConfiguredNetworkGroup_set_scheduler_priority_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_get_output_vstream_infos(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_vstream_infos_Request *request,
ConfiguredNetworkGroup_get_vstream_infos_Reply *reply) override;
virtual grpc::Status ConfiguredNetworkGroup_get_config_params(grpc::ServerContext*,
const ConfiguredNetworkGroup_get_config_params_Request *request,
ConfiguredNetworkGroup_get_config_params_Reply *reply) override;
+
+private:
+ void keep_alive();
+
+ std::mutex m_mutex;
+ std::map<uint32_t, std::chrono::time_point<std::chrono::high_resolution_clock>> m_clients_pids;
+ std::unique_ptr<std::thread> m_keep_alive;
};
}
-# This file contains HailoRT's configurable environment variables for HailoRT Service.
+# This file contains HailoRT's configurable environment variables for HailoRT Linux Service.
# The environment variables are set to their default values.
# To change an environment variable's value, follow the steps:
# 1. Change the value of the selected environemt variable in this file
[Service]
HAILORT_LOGGER_PATH="/var/log/hailo"
HAILO_DISABLE_MULTIPLEXER=0
-HAILO_ENABLE_MULTI_DEVICE_SCHEDULER=0
-SCHEDULER_MONITOR=0
+HAILO_MONITOR=0
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- *
- * @file hailort_service.cpp
- * @brief main for hailort service
- * To run as without daemonize the executable:
- * 1) Compile with `./build.sh`
- * 2) Run `./bin/linux.x86_64.debug/hailort_service standalone`
- *
- * To run as daemon service please follow the steps:
- * 1) Install the HailoRT:
- * cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_SERVICE=1 && sudo cmake --build build --target install
- *
- * 2) Reload systemd manager configuration:
- * sudo systemctl daemon-reload
- *
- * 3) Enable and start the service
- * sudo systemctl enable --now hailort.service
- *
- * 4) Stop service
- * sudo systemctl stop hailort.service
-*/
-
-#include "hailort_rpc_service.hpp"
-#include "rpc/rpc_definitions.hpp"
-#include "common/utils.hpp"
-#include "common/filesystem.hpp"
-#include "hailo/hailort_common.hpp"
-
-#include <syslog.h>
-#include <sys/stat.h>
-
-void RunService() {
- std::string server_address(hailort::HAILO_DEFAULT_UDS_ADDR);
- hailort::HailoRtRpcService service;
-
- grpc::ServerBuilder builder;
- builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
- builder.SetMaxReceiveMessageSize(-1);
- builder.RegisterService(&service);
- std::unique_ptr<grpc::Server> server(builder.BuildAndStart());
- chmod(hailort::HAILO_DEFAULT_SERVICE_ADDR.c_str(), S_IROTH | S_IWOTH | S_IRUSR | S_IWUSR);
- server->Wait();
-}
-
-void write_pid_to_lock_file()
-{
- auto status = hailort::Filesystem::create_directory(HAILO_DAEMON_PID_DIR);
- if (status != HAILO_SUCCESS) {
- syslog(LOG_ERR, "Cannot create directory at path, status=%i", status);
- return;
- }
-
- auto locked_file = hailort::LockedFile::create(HAILO_DAEMON_PID_FILE, "wx");
- if (HAILO_SUCCESS != locked_file.status()) {
- syslog(LOG_ERR, "Failed to lock pid file for hailort service, status=%i", locked_file.status());
- return;
- }
-
- std::string pid = std::to_string(getpid());
- auto ret = write(locked_file->get_fd(), pid.c_str(), pid.size());
- if (-1 == ret) {
- syslog(LOG_ERR, "Failed to write pid to lock file for hailort service, errno=%i", errno);
- return;
- }
-}
-
-int main(int argc, char *argv[])
-{
- bool is_standalone = ((1 < argc) && (strcmp("standalone", argv[1]) == 0));
- if (!is_standalone) {
- int ret = daemon(0, 0);
- if (ret < 0) {
- syslog(LOG_ERR, "Failed to create daemon with errno %i", errno);
- exit(EXIT_FAILURE);
- }
-
- write_pid_to_lock_file();
- }
- RunService();
- return 0;
-}
\ No newline at end of file
Resource(uint32_t pid, std::shared_ptr<T> resource)
: pid(pid), resource(std::move(resource))
{}
- std::shared_timed_mutex resource_mutex;
+
uint32_t pid;
std::shared_ptr<T> resource;
-
};
template<class T>
}
template<class K, class Func, typename... Args>
- K execute(uint32_t key, Func &lambda, Args... args)
+ K execute(uint32_t handle, Func &lambda, Args... args)
{
std::unique_lock<std::mutex> lock(m_mutex);
- auto resource_expected = resource_lookup(key);
+ auto resource_expected = resource_lookup(handle);
assert(resource_expected);
-
auto resource = resource_expected.release();
- std::shared_lock<std::shared_timed_mutex> resource_lock(resource->resource_mutex);
+
+ assert(contains(m_resources_mutexes, handle));
+ std::shared_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
lock.unlock();
K ret = lambda(resource->resource, args...);
return ret;
}
- uint32_t register_resource(uint32_t pid, std::shared_ptr<T> const &resource)
+ uint32_t register_resource(uint32_t pid, const std::shared_ptr<T> &resource)
{
std::unique_lock<std::mutex> lock(m_mutex);
-
+ auto index = m_current_handle_index.load();
// Create a new resource and register
- auto index = m_current_handle_index;
- m_resources.emplace(m_current_handle_index++, std::make_shared<Resource<T>>(pid, std::move(resource)));
+ m_resources.emplace(m_current_handle_index, std::make_shared<Resource<T>>(pid, std::move(resource)));
+ m_resources_mutexes[m_current_handle_index]; // construct std::shared_timed_mutex
+ m_current_handle_index++;
return index;
}
- hailo_status release_resource(uint32_t key)
+ uint32_t dup_handle(uint32_t pid, uint32_t handle)
+ {
+ // Keeping this function for future possible usage
+ (void)pid;
+ return handle;
+ }
+
+ hailo_status release_resource(uint32_t handle)
{
std::unique_lock<std::mutex> lock(m_mutex);
- auto found = m_resources.find(key);
- CHECK(found != m_resources.end(), HAILO_NOT_FOUND, "Failed to release resource with key {}, resource does not exist", key);
- std::unique_lock<std::shared_timed_mutex> resource_lock(found->second->resource_mutex);
- m_resources.erase(key);
+ auto found = m_resources.find(handle);
+ CHECK(found != m_resources.end(), HAILO_NOT_FOUND, "Failed to release resource with handle {}, resource does not exist", handle);
+ assert(contains(m_resources_mutexes, handle));
+ auto resource = m_resources[handle];
+ {
+ std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
+ m_resources.erase(handle);
+ }
+ m_resources_mutexes.erase(handle);
return HAILO_SUCCESS;
}
{
std::unique_lock<std::mutex> lock(m_mutex);
for (auto iter = m_resources.begin(); iter != m_resources.end(); ) {
+ auto handle = iter->first;
if (iter->second->pid == pid) {
- std::unique_lock<std::shared_timed_mutex> resource_lock(iter->second->resource_mutex);
- iter = m_resources.erase(iter);
+ assert(contains(m_resources_mutexes, handle));
+ {
+ std::unique_lock<std::shared_timed_mutex> resource_lock(m_resources_mutexes[handle]);
+ iter = m_resources.erase(iter);
+ }
+ m_resources_mutexes.erase(handle);
} else {
++iter;
}
: m_current_handle_index(0)
{}
- Expected<std::shared_ptr<Resource<T>>> resource_lookup(uint32_t key)
+ Expected<std::shared_ptr<Resource<T>>> resource_lookup(uint32_t handle)
{
- auto found = m_resources.find(key);
- CHECK_AS_EXPECTED(found != m_resources.end(), HAILO_NOT_FOUND, "Failed to find resource with key {}", key);
-
+ auto found = m_resources.find(handle);
+ CHECK_AS_EXPECTED(found != m_resources.end(), HAILO_NOT_FOUND, "Failed to find resource with handle {}", handle);
auto resource = found->second;
return resource;
}
std::mutex m_mutex;
- uint32_t m_current_handle_index;
+ std::atomic<uint32_t> m_current_handle_index;
std::unordered_map<uint32_t, std::shared_ptr<Resource<T>>> m_resources;
+ std::unordered_map<uint32_t, std::shared_timed_mutex> m_resources_mutexes;
};
}
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ *
+ * @file hailort_service.cpp
+ * @brief main for hailort service
+ * To run without daemonization run the hailort_service executable with `standalone`.
+ *
+ * To run as daemon service please follow the steps:
+ * 1) Install the HailoRT:
+ * cmake -H. -Bbuild -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_SERVICE=1 && sudo cmake --build build --target install
+ *
+ * 2) Reload systemd manager configuration:
+ * sudo systemctl daemon-reload
+ *
+ * 3) Enable and start the service
+ * sudo systemctl enable --now hailort.service
+ *
+ * 4) Stop service
+ * sudo systemctl stop hailort.service
+*/
+
+#include "hailort_rpc_service.hpp"
+#include "rpc/rpc_definitions.hpp"
+#include "common/utils.hpp"
+#include "common/filesystem.hpp"
+#include "hailo/hailort_common.hpp"
+#include "common/os_utils.hpp"
+
+#include <syslog.h>
+#include <sys/stat.h>
+
+void RunService() {
+ const std::string server_address = hailort::HAILORT_SERVICE_DEFAULT_ADDR;
+ hailort::HailoRtRpcService service;
+
+ grpc::ServerBuilder builder;
+ builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
+ builder.SetMaxReceiveMessageSize(-1);
+ builder.RegisterService(&service);
+ std::unique_ptr<grpc::Server> server(builder.BuildAndStart());
+ chmod(hailort::HAILO_DEFAULT_SERVICE_ADDR.c_str(), S_IROTH | S_IWOTH | S_IRUSR | S_IWUSR);
+ server->Wait();
+}
+
+void write_pid_to_lock_file()
+{
+ auto status = hailort::Filesystem::create_directory(HAILO_DAEMON_PID_DIR);
+ if (status != HAILO_SUCCESS) {
+ HAILORT_OS_LOG_ERROR("Cannot create directory at path, status={}", status);
+ return;
+ }
+
+ auto locked_file = hailort::LockedFile::create(HAILO_DAEMON_PID_FILE, "wx");
+ if (HAILO_SUCCESS != locked_file.status()) {
+ HAILORT_OS_LOG_ERROR("Failed to lock pid file for hailort service, status={}", locked_file.status());
+ return;
+ }
+
+ std::string pid = std::to_string(getpid());
+ auto ret = write(locked_file->get_fd(), pid.c_str(), pid.size());
+ if (-1 == ret) {
+ HAILORT_OS_LOG_ERROR("Failed to write pid to lock file for hailort service, errno={}", errno);
+ return;
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ bool is_standalone = ((1 < argc) && (strcmp("standalone", argv[1]) == 0));
+ if (!is_standalone) {
+ int ret = daemon(0, 0);
+ if (ret < 0) {
+ HAILORT_OS_LOG_ERROR("Failed to create daemon with errno {}", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ write_pid_to_lock_file();
+ }
+ RunService();
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ *
+ * @file hailort_service.cpp
+ * @brief main for hailort service
+ * The service code is based on Microsoft's documenataion: https://learn.microsoft.com/en-us/windows/win32/services/the-complete-service-sample
+ *
+ * Running hailort_service:
+ * To run hailort_service without Windows service control manager (SCM), run hailort_service executable with `standalone`.
+ *
+ * To run as a service application please follow the steps:
+ * 1) Compile and install libhailort:
+ * `cmake -H. -Bbuild -A=x64 -DCMAKE_BUILD_TYPE=Release -DHAILO_BUILD_SERVICE=1 && cmake --build build --config release --target install`
+ *
+ * 2) To install the service, run the `hailort_service` executable with `install`:
+ * `"C:\Program Files\HailoRT\bin\hailort_service.exe" install`
+
+ * 3) Start the service:
+ * `sc start hailort_service`
+ *
+ * 4) Stop the service:
+ * `sc stop hailort_service`
+ *
+ * 5) Delete service:
+ * `sc delete hailort_service`
+*/
+
+#include "hailort_rpc_service.hpp"
+#include "rpc/rpc_definitions.hpp"
+#include "common/os_utils.hpp"
+
+#include <winsvc.h>
+#include <windows.h>
+#include <tchar.h>
+#include <strsafe.h>
+
+#define SERVICE_NAME ("hailort_service")
+static const DWORD HRT_SERVICE_INIT_WAIT_TIME_MS(3000);
+static const DWORD HRT_SERVICE_ZERO_WAIT_TIME_MS(0);
+
+SERVICE_STATUS g_service_status = {0};
+SERVICE_STATUS_HANDLE g_service_status_handle = nullptr;
+HANDLE g_stop_event_handle = INVALID_HANDLE_VALUE;
+std::unique_ptr<grpc::Server> g_hailort_rpc_server = nullptr;
+
+void RunService()
+{
+ const std::string server_address = hailort::HAILORT_SERVICE_DEFAULT_ADDR;
+ hailort::HailoRtRpcService service;
+
+ grpc::ServerBuilder builder;
+ builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
+ builder.SetMaxReceiveMessageSize(-1);
+ builder.RegisterService(&service);
+ g_hailort_rpc_server = builder.BuildAndStart();
+ g_hailort_rpc_server->Wait();
+}
+
+// Installs the service in the SCM database
+void install_service()
+{
+ SC_HANDLE open_sc_manager_handle = nullptr;
+ SC_HANDLE create_service_handle = nullptr;
+ TCHAR module_path[MAX_PATH];
+
+ if (!GetModuleFileName(nullptr, module_path, MAX_PATH)) {
+ HAILORT_OS_LOG_ERROR("GetModuleFileName() failed. Cannot install hailort service, LE = {}", GetLastError());
+ return;
+ }
+
+ TCHAR quoted_module_path[MAX_PATH];
+ StringCbPrintf(quoted_module_path, MAX_PATH, ("\"%s\""), module_path);
+
+ // Get a handle to the SCM database.
+ open_sc_manager_handle = OpenSCManager(
+ nullptr, // local computer
+ nullptr, // ServicesActive database
+ SC_MANAGER_ALL_ACCESS); // full access rights
+
+ if (nullptr == open_sc_manager_handle) {
+ HAILORT_OS_LOG_ERROR("OpenSCManager() failed. Cannot install hailort service, LE = {}", GetLastError());
+ return;
+ }
+
+ // Create the service
+ create_service_handle = CreateService(
+ open_sc_manager_handle, // SCM database
+ SERVICE_NAME, // name of service
+ SERVICE_NAME, // service name to display
+ SERVICE_ALL_ACCESS, // desired access
+ SERVICE_WIN32_OWN_PROCESS, // service type
+ SERVICE_DEMAND_START, // start type
+ SERVICE_ERROR_NORMAL, // error control type
+ quoted_module_path, // path to service's binary
+ nullptr, // no load ordering group
+ nullptr, // no tag identifier
+ nullptr, // no dependencies
+ nullptr, // LocalSystem account
+ nullptr); // no password
+
+ if (nullptr == create_service_handle) {
+ HAILORT_OS_LOG_ERROR("CreateService() failed. Cannot install hailort service, LE = {}", GetLastError());
+ CloseServiceHandle(open_sc_manager_handle);
+ return;
+ }
+
+ CloseServiceHandle(create_service_handle);
+ CloseServiceHandle(open_sc_manager_handle);
+}
+
+// Sets the current service status and reports it to the SCM
+void report_service_status(DWORD current_state, DWORD win32_exit_code, DWORD wait_hint)
+{
+ static DWORD check_point = 1;
+ g_service_status.dwCurrentState = current_state;
+ g_service_status.dwWin32ExitCode = win32_exit_code;
+ g_service_status.dwWaitHint = wait_hint;
+
+ if (SERVICE_START_PENDING == current_state) {
+ // Service is about to start
+ g_service_status.dwControlsAccepted = 0;
+ } else {
+ g_service_status.dwControlsAccepted = SERVICE_ACCEPT_STOP;
+ }
+
+ if ((SERVICE_RUNNING == current_state) || (SERVICE_STOPPED == current_state)) {
+ g_service_status.dwCheckPoint = 0;
+ } else {
+ g_service_status.dwCheckPoint = check_point++;
+ }
+
+ // Report the service status to the SCM.
+ SetServiceStatus(g_service_status_handle, &g_service_status);
+}
+
+// Called by SCM whenever a control code is sent to the service
+void control_handler(DWORD control_code)
+{
+ switch(control_code) {
+ case SERVICE_CONTROL_STOP:
+ report_service_status(SERVICE_STOP_PENDING, NO_ERROR, HRT_SERVICE_ZERO_WAIT_TIME_MS);
+
+ // Signal the service to stop.
+ SetEvent(g_stop_event_handle);
+ report_service_status(SERVICE_STOPPED, NO_ERROR, HRT_SERVICE_ZERO_WAIT_TIME_MS);
+ return;
+
+ default:
+ break;
+ }
+}
+
+void terminate_server_thread(HANDLE thread_handle)
+{
+ g_hailort_rpc_server->Shutdown();
+ auto rpc_server_wait_res = WaitForSingleObject(thread_handle, INFINITE);
+ if (WAIT_OBJECT_0 == rpc_server_wait_res) {
+ CloseHandle(thread_handle);
+ } else {
+ HAILORT_OS_LOG_ERROR("Failed waiting on hailort server thread, LE = {}", GetLastError());
+ report_service_status(SERVICE_STOPPED, GetLastError(), HRT_SERVICE_ZERO_WAIT_TIME_MS);
+ }
+}
+
+// The service code
+void service_init()
+{
+ // Create an event. The control handler function signals this event when it receives the stop control code.
+ g_stop_event_handle = CreateEvent(
+ nullptr, // default security attributes
+ TRUE, // manual reset event
+ FALSE, // not signaled
+ nullptr); // no name
+ if (nullptr == g_stop_event_handle) {
+ report_service_status(SERVICE_STOPPED, GetLastError(), HRT_SERVICE_ZERO_WAIT_TIME_MS);
+ return;
+ }
+
+ // Report SCM the running status when initialization is complete.
+ report_service_status(SERVICE_RUNNING, NO_ERROR, HRT_SERVICE_ZERO_WAIT_TIME_MS);
+
+ // Start a thread that will perform the main task of the service
+ HANDLE service_thread_handle = CreateThread(nullptr, 0, (LPTHREAD_START_ROUTINE)RunService, nullptr, 0, nullptr);
+ if (nullptr == service_thread_handle) {
+ HAILORT_OS_LOG_ERROR("Failed to create hailort_service thread, LE = {}", GetLastError());
+ }
+
+ // Wait for stop service signal
+ auto service_wait_res = WaitForSingleObject(g_stop_event_handle, INFINITE);
+ if (WAIT_OBJECT_0 == service_wait_res) {
+ terminate_server_thread(service_thread_handle);
+ report_service_status(SERVICE_STOPPED, NO_ERROR, HRT_SERVICE_ZERO_WAIT_TIME_MS);
+ } else {
+ HAILORT_OS_LOG_ERROR("Failed waiting for signal on hailort_service stop event, LE = {}", GetLastError());
+ report_service_status(SERVICE_STOPPED, GetLastError(), HRT_SERVICE_ZERO_WAIT_TIME_MS);
+ }
+ CloseHandle(g_stop_event_handle);
+}
+
+// Entry point for the service
+void service_main(DWORD /* dwArgc */, LPTSTR * /*lpszArgv*/)
+{
+ // Register the handler function for the service
+ g_service_status_handle = RegisterServiceCtrlHandler(SERVICE_NAME, control_handler);
+ if (!g_service_status_handle) {
+ HAILORT_OS_LOG_ERROR("RegisterServiceCtrlHandler() failed. Cannot start hailort service, LE = {}", GetLastError());
+ return;
+ }
+
+ g_service_status.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
+ g_service_status.dwServiceSpecificExitCode = 0;
+
+ // Report initial status to the SCM - service is starting
+ report_service_status(SERVICE_START_PENDING, NO_ERROR, HRT_SERVICE_INIT_WAIT_TIME_MS);
+ service_init();
+}
+
+int main(int argc, TCHAR *argv[])
+{
+ const bool is_standalone = ((1 < argc) && (0 == lstrcmpi(argv[1], "standalone")));
+ if (is_standalone) {
+ RunService();
+ return 0;
+ }
+
+ // If command-line parameter is "install", install the service.
+ // Otherwise, the service is probably being started by the SCM.
+ if ((0 < argc) && (0 == lstrcmpi(argv[1], "install"))) {
+ install_service();
+ return 0;
+ }
+
+ // Service is being started by the SCM
+ SERVICE_TABLE_ENTRY dispatch_table[] = {
+ {SERVICE_NAME, static_cast<LPSERVICE_MAIN_FUNCTION>(service_main)},
+ {nullptr, nullptr}
+ };
+
+ // This call returns when the service has stopped (SERVICE_STOPPED).
+ // The process should simply terminate when the call returns.
+ if (!StartServiceCtrlDispatcher(dispatch_table)) {
+ HAILORT_OS_LOG_ERROR("StartServiceCtrlDispatcher() failed. Cannot start hailort service, LE = {}", GetLastError());
+ }
+ return 0;
+}
\ No newline at end of file
--- /dev/null
+@REM This file contains HailoRT's configurable environment variables for HailoRT Windows Service.
+@REM The environment variables are set to their default values, and are seperated by the character \0.
+@REM To change an environment variable's value, follow the steps:
+@REM 1. Change the value of the selected environment variable in this file
+@REM 2. Run this script
+@REM 3. Restart the service
+@REM Running this script requires Administrator permissions.
+
+reg ADD HKLM\SYSTEM\CurrentControlSet\Services\hailort_service /f /v Environment /t REG_MULTI_SZ /d ^
+HAILORT_LOGGER_PATH="%PROGRAMDATA%\HailoRT_Service\logs"\0^
+HAILO_DISABLE_MULTIPLEXER=0\0
\ No newline at end of file
fw_config_serializer.cpp
common.cpp
benchmark_command.cpp
- temp_measurement.cpp
parse_hef_command.cpp
graph_printer.cpp
mon_command.cpp
run2/live_printer.cpp
run2/timer_live_track.cpp
run2/network_live_track.cpp
+ run2/measurement_live_track.cpp
)
if(UNIX)
${HAILORT_COMMON_CPP_SOURCES}
${PROJECT_SOURCE_DIR}/common/src/firmware_header_utils.c
${PROJECT_SOURCE_DIR}/common/src/md5.c
- ${HAILORT_SRC_DIR}/pipeline.cpp
- ${HAILO_FULL_OS_DIR}/event.cpp
+ ${HAILORT_SRC_DIR}/net_flow/pipeline/pipeline.cpp # TODO: link dynamically with libhailort
+ ${HAILO_FULL_OS_DIR}/event.cpp # TODO: link dynamically with libhailort
)
+
target_compile_options(hailortcli PRIVATE ${HAILORT_COMPILE_OPTIONS})
set_property(TARGET hailortcli PROPERTY CXX_STANDARD 14)
set_property(TARGET hailortcli PROPERTY INSTALL_RPATH "$ORIGIN" "../lib/") # Link with a relative libhailort
void CliCommon::reset_cursor(size_t lines_count)
{
for (size_t i = 0; i < lines_count; i++) {
- std::cout << FORMAT_CURSOR_UP_LINE;
+ std::cout << FORMAT_CURSOR_UP_LINE; // Override prev line
+ std::cout << FORMAT_CLEAR_LINE; // Delete line
}
}
-void CliCommon::clear_lines_down(size_t lines_count)
+void CliCommon::clear_terminal()
{
- for (size_t i = 0; i < lines_count; i++) {
- std::cout << FORMAT_CURSOR_DOWN_CLEAR_LINE;
- }
+ std::cout << FORMAT_CLEAR_TERMINAL_CURSOR_FIRST_LINE;
}
bool CliCommon::is_positive_number(const std::string &s)
bool is_number = (!s.empty()) && (std::all_of(s.begin(), s.end(), ::isdigit));
return is_number && (0 <= std::stoi(s));
}
+
+AlternativeTerminal::AlternativeTerminal()
+{
+ std::cout << FORMAT_ENTER_ALTERNATIVE_SCREEN;
+ CliCommon::clear_terminal();
+}
+
+AlternativeTerminal::~AlternativeTerminal()
+{
+ std::cout << FORMAT_EXIT_ALTERNATIVE_SCREEN;
+}
\ No newline at end of file
// http://www.climagic.org/mirrors/VT100_Escape_Codes.html
#define FORMAT_CLEAR_LINE "\033[2K\r"
#define FORMAT_CURSOR_UP_LINE "\033[F"
-#define FORMAT_CURSOR_DOWN_CLEAR_LINE "\033[B\33[2K\r"
+#define FORMAT_CLEAR_TERMINAL_CURSOR_FIRST_LINE "\033[2J\033[1;1H"
+#define FORMAT_ENTER_ALTERNATIVE_SCREEN "\033[?1049h"
+#define FORMAT_EXIT_ALTERNATIVE_SCREEN "\033[?1049l"
+#define FORMAT_GREEN_PRINT "\x1B[1;32m"
+#define FORMAT_NORMAL_PRINT "\x1B[0m"
class CliCommon final
{
static std::string duration_to_string(std::chrono::seconds secs);
static Expected<std::string> current_time_to_string();
static void reset_cursor(size_t number_of_lines);
- static void clear_lines_down(size_t number_of_lines);
static bool is_positive_number(const std::string &s);
static bool is_non_negative_number(const std::string &s);
+ static void clear_terminal();
};
// Validators
}
};
+// This class is an RAII for running in alternative terminal
+class AlternativeTerminal final
+{
+public:
+ AlternativeTerminal();
+ ~AlternativeTerminal();
+};
+
// Based on NLOHMANN_JSON_SERIALIZE_ENUM (json/include/nlohmann/json.hpp)
// Accepts a static array instead of building one in the function
#define NLOHMANN_JSON_SERIALIZE_ENUM2(ENUM_TYPE, _pair_arr)\
// TODO - HRT-7364 - add CPU subsystem frequency into the device extended info control
// and use it for get the timer's frequency
#define NN_CORE_TO_TIMER_FREQ_FACTOR (2)
-#define MERCURY_VPU_CORE_CPU_DEFAULT_FREQ_MHZ (200)
+#define HAILO15_VPU_CORE_CPU_DEFAULT_FREQ_MHZ (200)
constexpr int DownloadActionListCommand::INVALID_NUMERIC_VALUE;
auto chip_arch = device.get_architecture();
CHECK_EXPECTED_AS_STATUS(chip_arch);
unsigned int clock_cycle = 0;
- // TODO - HRT-8046 Implement extended device info for mercury
- if (HAILO_ARCH_MERCURY_VPU == chip_arch.value()) {
- clock_cycle = MERCURY_VPU_CORE_CPU_DEFAULT_FREQ_MHZ;
+ // TODO - HRT-8046 Implement extended device info for hailo15
+ if (HAILO_ARCH_HAILO15 == chip_arch.value()) {
+ clock_cycle = HAILO15_VPU_CORE_CPU_DEFAULT_FREQ_MHZ;
} else {
auto extended_info = device.get_extended_device_information();
CHECK_EXPECTED_AS_STATUS(extended_info);
return "HAILO8";\r
case HAILO_ARCH_HAILO8L:\r
return "HAILO8L";\r
- case HAILO_ARCH_MERCURY_CA:\r
- return "MERCURY_CA";\r
- case HAILO_ARCH_MERCURY_VPU:\r
- return "MERCURY_VPU";\r
+ case HAILO_ARCH_HAILO15:\r
+ return "HAILO15";\r
default:\r
return "Unknown";\r
}\r
return HAILO_INVALID_OPERATION;
}
- if (Device::Type::CORE != device.get_type()) {
+ if (Device::Type::INTEGRATED != device.get_type()) {
status = write_logs_to_file(device, ofs, HAILO_CPU_ID_0);
if (status != HAILO_SUCCESS){
return status;
#include "hailo/hailort.h"
#include "hailo/vstream.hpp"
-#include "pipeline.hpp"
+
+#include "net_flow/pipeline/pipeline.hpp"
#include "DotWriter.h"
#include <memory>
#include <type_traits>
+
namespace hailort
{
} \
} while (0)
+// Used for run and run2 commands
+constexpr size_t OVERALL_LATENCY_TIMESTAMPS_LIST_LENGTH (512);
+
struct hailo_device_params {
std::vector<std::string> device_ids;
};
#include "infer_stats_printer.hpp"
#include "run_command.hpp"
#include "common.hpp"
-#include "pipeline.hpp"
+
+#include "net_flow/pipeline/pipeline.hpp"
#include <fstream>
#include <iostream>
for (const auto &pair : inference_result->m_temp_measurements) {
if (nullptr != pair.second) {
m_results_csv_file << ",";
- m_results_csv_file << pair.second->min_value;
+ if (auto min = pair.second->min()) {
+ m_results_csv_file << *min;
+ }
m_results_csv_file << ",";
- m_results_csv_file << pair.second->average_value;
+ if (auto mean = pair.second->mean()) {
+ m_results_csv_file << *mean;
+ }
m_results_csv_file << ",";
- m_results_csv_file << pair.second->max_value;
+ if (auto max = pair.second->max()) {
+ m_results_csv_file << *max;
+ }
} else {
m_results_csv_file << ",,,";
}
}
auto temp_measure_iter = inference_result->m_temp_measurements.find(pair.first);
if ((temp_measure_iter != inference_result->m_temp_measurements.end()) && (nullptr != temp_measure_iter->second)) {
- measurement_stream << " Minimum chip temperature: " << temp_measure_iter->second->min_value << "C" << std::endl;
- measurement_stream << " Average chip temperature: " << temp_measure_iter->second->average_value << "C" << std::endl;
- measurement_stream << " Maximum chip temperature: " << temp_measure_iter->second->max_value << "C" << std::endl;
+ if (auto min = temp_measure_iter->second->min()) {
+ measurement_stream << " Minimum chip temperature: " << *min << "C" << std::endl;
+ }
+ if (auto mean = temp_measure_iter->second->mean()) {
+ measurement_stream << " Average chip temperature: " << *mean << "C" << std::endl;
+ }
+ if (auto max = temp_measure_iter->second->max()) {
+ measurement_stream << " Maximum chip temperature: " << *max << "C" << std::endl;
+ }
}
if (0 != measurement_stream.rdbuf()->in_avail()) {
std::cout << " Device: " << pair.first << std::endl;
#include "inference_progress.hpp"
#include "infer_stats_printer.hpp"
#include "common.hpp"
+#include "common/os_utils.hpp"
#include <iostream>
#include <iomanip>
void InferProgress::start()
{
m_print_thread = std::thread([this] () {
+ OsUtils::set_current_thread_name("PROGRESS_BAR");
while (true) {
print_progress(true);
auto status = m_stop_event->wait(m_print_interval);
#define _HAILO_INFER_RESULT_
#include "power_measurement_command.hpp"
-#include "temp_measurement.hpp"
+#include "common/device_measurements.hpp"
#include "hailo/runtime_statistics.hpp"
#include "hailo/vstream.hpp"
for (const auto &device : devices) {
m_power_measurements.emplace(device.get().get_dev_id(), std::shared_ptr<LongPowerMeasurement>{});
m_current_measurements.emplace(device.get().get_dev_id(), std::shared_ptr<LongPowerMeasurement>{});
- m_temp_measurements.emplace(device.get().get_dev_id(), std::shared_ptr<TempMeasurementData>{});
+ m_temp_measurements.emplace(device.get().get_dev_id(), std::shared_ptr<AccumulatorResults>{});
}
}
return HAILO_SUCCESS;
}
- hailo_status set_temp_measurement(const std::string &device_id, std::shared_ptr<TempMeasurementData> &&temp_measure)
+ hailo_status set_temp_measurement(const std::string &device_id, std::shared_ptr<AccumulatorResults> &&temp_measure)
{
auto iter = m_temp_measurements.find(device_id);
CHECK(m_temp_measurements.end() != iter, HAILO_INVALID_ARGUMENT);
// TODO: create a struct containing all device measurements, and keep only one map
std::map<std::string, std::shared_ptr<LongPowerMeasurement>> m_power_measurements;
std::map<std::string, std::shared_ptr<LongPowerMeasurement>> m_current_measurements;
- std::map<std::string, std::shared_ptr<TempMeasurementData>> m_temp_measurements;
+ std::map<std::string, std::shared_ptr<AccumulatorResults>> m_temp_measurements;
private:
std::vector<NetworkGroupInferResult> m_network_group_results;
* @brief Monitor of networks - Presents information about the running networks
**/
-#include "mon_command.hpp"
-#include "common.hpp"
#include "hailo/hailort.h"
+
#include "common/filesystem.hpp"
+#include "mon_command.hpp"
+#include "common.hpp"
+
#include <iostream>
+#include <signal.h>
+#include <thread>
#if defined(__GNUC__)
#include <sys/ioctl.h>
#endif
namespace hailort
{
-// TODO: Deal with longer networks names - should use HAILO_MAX_NETWORK_NAME_SIZE but its too long for one line
-constexpr size_t NETWORK_NAME_WIDTH = 40;
-constexpr size_t STREAM_NAME_WIDTH = 60;
-constexpr size_t ACTIVE_TIME_WIDTH = 25;
+constexpr size_t STRING_WIDTH = 60;
+constexpr size_t NETWORK_GROUP_NAME_WIDTH = STRING_WIDTH;
+constexpr size_t DEVICE_ID_WIDTH = STRING_WIDTH;
+constexpr size_t STREAM_NAME_WIDTH = STRING_WIDTH;
+constexpr size_t UTILIZATION_WIDTH = 25;
constexpr size_t NUMBER_WIDTH = 15;
constexpr size_t TERMINAL_DEFAULT_WIDTH = 80;
-constexpr size_t LINE_LENGTH = 125;
+constexpr size_t LINE_LENGTH = NETWORK_GROUP_NAME_WIDTH + STREAM_NAME_WIDTH + UTILIZATION_WIDTH + NUMBER_WIDTH;
constexpr std::chrono::milliseconds EPSILON_TIME(500);
+inline std::string truncate_str(const std::string &original_str, uint32_t max_length)
+{
+ static const std::string ELLIPSIS = "... ";
+ return (original_str.length() > max_length) ? original_str.substr(0, (max_length - ELLIPSIS.length())) + ELLIPSIS : original_str;
+}
+
MonCommand::MonCommand(CLI::App &parent_app) :
Command(parent_app.add_subcommand("monitor", "Monitor of networks - Presents information about the running networks. " \
"To enable monitor, set in the application process the environment variable '" + std::string(SCHEDULER_MON_ENV_VAR) + "' to 1."))
LOGGER__ERROR("hailortcli `monitor` command is not supported on Windows");
return HAILO_NOT_IMPLEMENTED;
#else
- return print_table();
+ return run_monitor();
#endif
}
-size_t MonCommand::print_networks_info_header()
+void MonCommand::print_devices_info_header()
{
std::cout <<
- std::setw(NETWORK_NAME_WIDTH) << std::left << "Network" <<
+ std::setw(DEVICE_ID_WIDTH) << std::left << "Device ID" <<
+ std::setw(UTILIZATION_WIDTH) << std::left << "Utilization (%)" <<
+ std::setw(STRING_WIDTH) << std::left << "Architecture" <<
+ "\n" << std::left << std::string(LINE_LENGTH, '-') << "\n";
+}
+
+void MonCommand::print_devices_info_table(const ProtoMon &mon_message)
+{
+ auto data_line_len = NUMBER_WIDTH + NETWORK_GROUP_NAME_WIDTH + DEVICE_ID_WIDTH;
+ auto rest_line_len = LINE_LENGTH - data_line_len;
+
+ for (const auto &device_info : mon_message.device_infos()) {
+ auto device_id = device_info.device_id();
+ auto utilization = device_info.utilization();
+ auto device_arch = device_info.device_arch();
+
+ std::cout << std::setprecision(1) << std::fixed <<
+ std::setw(DEVICE_ID_WIDTH) << std::left << device_id <<
+ std::setw(UTILIZATION_WIDTH) << std::left << utilization <<
+ std::setw(STRING_WIDTH) << std::left << device_arch <<
+ std::string(rest_line_len, ' ') << "\n";
+ }
+}
+
+void MonCommand::print_networks_info_header()
+{
+ std::cout <<
+ std::setw(NETWORK_GROUP_NAME_WIDTH) << std::left << "Model" <<
+ std::setw(UTILIZATION_WIDTH) << std::left << "Utilization (%) " <<
std::setw(NUMBER_WIDTH) << std::left << "FPS" <<
- std::setw(ACTIVE_TIME_WIDTH) << std::left << "Active Time (%) " <<
std::setw(NUMBER_WIDTH) << std::left << "PID" <<
"\n" << std::left << std::string(LINE_LENGTH, '-') << "\n";
- static const uint32_t header_lines_count = 2;
-
- return header_lines_count;
}
-size_t MonCommand::print_networks_info_table(const ProtoMon &mon_message)
+void MonCommand::print_networks_info_table(const ProtoMon &mon_message)
{
const uint32_t NUMBER_OBJECTS_COUNT = 3;
- auto data_line_len = (NUMBER_WIDTH * NUMBER_OBJECTS_COUNT) + NETWORK_NAME_WIDTH;
+ auto data_line_len = (NUMBER_WIDTH * NUMBER_OBJECTS_COUNT) + NETWORK_GROUP_NAME_WIDTH;
auto rest_line_len = LINE_LENGTH - data_line_len;
const std::string &pid = mon_message.pid();
- for (auto net_info : mon_message.networks_infos()) {
- auto &net_name = net_info.network_name();
+ for (const auto &net_info : mon_message.networks_infos()) {
+ auto &original_net_name = net_info.network_name();
+ auto net_name = truncate_str(original_net_name, NETWORK_GROUP_NAME_WIDTH);
auto fps = net_info.fps();
- auto active_time = net_info.active_time();
+ auto utilization = net_info.utilization();
std::cout << std::setprecision(1) << std::fixed <<
- std::setw(NETWORK_NAME_WIDTH) << std::left << net_name <<
+ std::setw(STRING_WIDTH) << std::left << net_name <<
+ std::setw(UTILIZATION_WIDTH) << std::left << utilization <<
std::setw(NUMBER_WIDTH) << std::left << fps <<
- std::setw(ACTIVE_TIME_WIDTH) << std::left << active_time <<
std::setw(NUMBER_WIDTH) << std::left << pid << std::string(rest_line_len, ' ') << "\n";
}
-
- return mon_message.networks_infos().size();
}
-size_t MonCommand::print_frames_header()
+void MonCommand::print_frames_header()
{
std::cout <<
- std::setw(NETWORK_NAME_WIDTH) << std::left << "Network" <<
- std::setw(STREAM_NAME_WIDTH) << std::left << "Stream" <<
+ std::setw(STRING_WIDTH) << std::left << "Model" <<
+ std::setw(STRING_WIDTH) << std::left << "Stream" <<
std::setw(NUMBER_WIDTH) << std::left << "Direction" <<
std::setw(NUMBER_WIDTH) << std::left << "Frames" <<
"\n" << std::left << std::string(LINE_LENGTH, '-') << "\n";
- static const size_t header_lines_count = 2;
- return header_lines_count;
}
-size_t MonCommand::print_frames_table(const ProtoMon &mon_message)
+void MonCommand::print_frames_table(const ProtoMon &mon_message)
{
- size_t table_lines_count = 0;
- for (auto &net_info : mon_message.net_frames_infos()) {
- auto &net_name = net_info.network_name();
- table_lines_count += net_info.streams_frames_infos().size();
- for (auto &streams_frames : net_info.streams_frames_infos()) {
- auto &stream_name = streams_frames.stream_name();
+ for (const auto &net_info : mon_message.net_frames_infos()) {
+ auto &original_net_name = net_info.network_name();
+ auto net_name = truncate_str(original_net_name, NETWORK_GROUP_NAME_WIDTH);
+ for (const auto &streams_frames : net_info.streams_frames_infos()) {
+ auto &stream_name_original = streams_frames.stream_name();
+ auto stream_name = truncate_str(stream_name_original, STREAM_NAME_WIDTH);
auto stream_direction = (streams_frames.stream_direction() == PROTO__STREAM_DIRECTION__HOST_TO_DEVICE) ? "H2D" : "D2H";
std::string frames;
}
std::cout <<
- std::setw(NETWORK_NAME_WIDTH) << std::left << net_name <<
- std::setw(STREAM_NAME_WIDTH) << std::left << stream_name <<
+ std::setw(STRING_WIDTH) << std::left << net_name <<
+ std::setw(STRING_WIDTH) << std::left << stream_name <<
std::setw(NUMBER_WIDTH) << std::left << stream_direction <<
std::setw(NUMBER_WIDTH) << std::left << frames << "\n";
}
}
-
- return table_lines_count;
}
#if defined(__GNUC__)
return terminal_line_width;
}
-hailo_status MonCommand::print_table()
+void MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width)
{
+ print_devices_info_header();
+ for (const auto &mon_message : mon_messages) {
+ print_devices_info_table(mon_message);
+ }
+
+ std::cout << std::string(terminal_line_width, ' ') << "\n";
+ std::cout << std::string(terminal_line_width, ' ') << "\n";
+
+ print_networks_info_header();
+
+ for (const auto &mon_message : mon_messages) {
+ print_networks_info_table(mon_message);
+ }
+
+ std::cout << std::string(terminal_line_width, ' ') << "\n";
+ std::cout << std::string(terminal_line_width, ' ') << "\n";
+
+ print_frames_header();
+ for (const auto &mon_message : mon_messages) {
+ print_frames_table(mon_message);
+ }
+}
+
+static volatile bool keep_running = true;
+void signit_handler(int /*dummy*/)
+{
+ keep_running = false;
+}
+
+hailo_status MonCommand::run_monitor()
+{
+ // Note: There is no need to unregister to previous SIGINT handler since we finish running after it is called.
+ signal(SIGINT, signit_handler);
+
std::chrono::milliseconds time_interval = DEFAULT_SCHEDULER_MON_INTERVAL + EPSILON_TIME;
auto terminal_line_width_expected = get_terminal_line_width();
CHECK_EXPECTED_AS_STATUS(terminal_line_width_expected);
auto terminal_line_width = terminal_line_width_expected.release();
- size_t last_run_total_lines_count = 0;
- bool data_was_printed = false;
- while (true) {
- size_t total_lines_count = 0;
+ AlternativeTerminal alt_terminal;
+ while (keep_running) {
bool print_warning_msg = true; // Will change to false only if mon directory is valid and there are updated files in it.
auto mon_dir_valid = Filesystem::is_directory(SCHEDULER_MON_TMP_DIR);
auto file = LockedFile::create(mon_file, "r");
if (HAILO_SUCCESS != file.status()) {
LOGGER__ERROR("Failed to open and lock file {}, with status: {}", mon_file, file.status());
- total_lines_count++;
continue;
}
ProtoMon mon_message;
if (!mon_message.ParseFromFileDescriptor(file->get_fd())) {
LOGGER__WARNING("Failed to ParseFromFileDescriptor monitor file {} with errno {}", mon_file, errno);
- total_lines_count++;
continue;
}
}
}
- total_lines_count += print_networks_info_header();
- for (auto &mon_message : mon_messages) {
- total_lines_count += print_networks_info_table(mon_message);
- }
-
- std::cout << std::string(terminal_line_width, ' ') << "\n";
- std::cout << std::string(terminal_line_width, ' ') << "\n";
- total_lines_count += 2;
-
- total_lines_count += print_frames_header();
- for (auto &mon_message : mon_messages) {
- total_lines_count += print_frames_table(mon_message);
- }
-
+ print_tables(mon_messages, terminal_line_width);
if (print_warning_msg) {
- std::cout << "Monitor did not retrieve any files. This occurs when there is no application currently running. If this is not the case, verify that environment variable '" <<
- SCHEDULER_MON_ENV_VAR << "' is set to 1.\n";
- total_lines_count++;
-
- if (data_was_printed) {
- auto lines_to_clear = last_run_total_lines_count - total_lines_count;
- CliCommon::clear_lines_down(lines_to_clear);
- total_lines_count += lines_to_clear;
- data_was_printed = false;
- }
- }
- else {
- data_was_printed = true;
- last_run_total_lines_count = total_lines_count;
+ std::cout << FORMAT_GREEN_PRINT << "Monitor did not retrieve any files. This occurs when there is no application currently running.\n"
+ << "If this is not the case, verify that environment variable '" << SCHEDULER_MON_ENV_VAR << "' is set to 1.\n" << FORMAT_NORMAL_PRINT;
}
- CliCommon::reset_cursor(total_lines_count);
+ CliCommon::clear_terminal();
std::this_thread::sleep_for(DEFAULT_SCHEDULER_MON_INTERVAL);
}
-
+
return HAILO_SUCCESS;
}
#endif
#define _HAILO_MON_COMMAND_HPP_
#include "hailo/hailort.h"
+
#include "hailortcli.hpp"
#include "command.hpp"
-#include "scheduler_mon.hpp"
+#include "vdevice/scheduler/scheduler_mon.hpp"
#include "CLI/CLI.hpp"
virtual hailo_status execute() override;
private:
- hailo_status print_table();
- size_t print_networks_info_header();
- size_t print_frames_header();
- size_t print_networks_info_table(const ProtoMon &mon_message);
- size_t print_frames_table(const ProtoMon &mon_message);
+ hailo_status run_monitor();
+ void print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width);
+ void print_devices_info_header();
+ void print_networks_info_header();
+ void print_frames_header();
+ void print_devices_info_table(const ProtoMon &mon_message);
+ void print_networks_info_table(const ProtoMon &mon_message);
+ void print_frames_table(const ProtoMon &mon_message);
+ hailo_status run_in_alternative_terminal();
};
} /* namespace hailort */
#include "common/filesystem.hpp"
#include "hailo/hailort_common.hpp"
-#define TAB (" ")
-
-static std::string add_tabs(uint8_t count)
-{
- // Each TAB counts as 4 spaces
- std::string res = "";
- for (uint8_t i = 0; i < count; i++) {
- res = res + TAB;
- }
- return res;
-}
-
-static std::string get_shape_str(const hailo_stream_info_t &stream_info)
-{
- switch (stream_info.format.order)
- {
- case HAILO_FORMAT_ORDER_HAILO_NMS:
- return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
- "(number of classes: " + std::to_string(stream_info.nms_info.number_of_classes) +
- ", max_bboxes_per_class: "+ std::to_string(stream_info.nms_info.max_bboxes_per_class) + ")";
- case HAILO_FORMAT_ORDER_NC:
- return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
- "(" + std::to_string(stream_info.hw_shape.features) + ")";
- case HAILO_FORMAT_ORDER_NHW:
- return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
- "(" + std::to_string(stream_info.hw_shape.height) + "x" + std::to_string(stream_info.hw_shape.width) + ")";
- default:
- return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
- "(" + std::to_string(stream_info.hw_shape.height) + "x" + std::to_string(stream_info.hw_shape.width) +
- "x" + std::to_string(stream_info.hw_shape.features) + ")";
- }
-}
-
-static std::string get_shape_str(const hailo_vstream_info_t &vstream_info)
-{
- switch (vstream_info.format.order)
- {
- case HAILO_FORMAT_ORDER_HAILO_NMS:
- return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
- "(number of classes: " + std::to_string(vstream_info.nms_shape.number_of_classes) +
- ", max_bboxes_per_class: " + std::to_string(vstream_info.nms_shape.max_bboxes_per_class) + ")";
- case HAILO_FORMAT_ORDER_NC:
- return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
- "(" + std::to_string(vstream_info.shape.features) + ")";
- case HAILO_FORMAT_ORDER_NHW:
- return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
- "(" +std::to_string(vstream_info.shape.height) + "x" + std::to_string(vstream_info.shape.width) + ")";
- default:
- return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
- "(" + std::to_string(vstream_info.shape.height) + "x" + std::to_string(vstream_info.shape.width) + "x" +
- std::to_string(vstream_info.shape.features) + ")";
- }
-}
-
ParseHefCommand::ParseHefCommand(CLI::App &parent_app) :
Command(parent_app.add_subcommand("parse-hef", "Parse HEF to get information about its components"))
{
CHECK_EXPECTED_AS_STATUS(hef_exp, "Failed to parse HEF");
auto hef = hef_exp.release();
- auto network_group_infos = hef.get_network_groups_infos();
- CHECK_EXPECTED_AS_STATUS(network_group_infos);
- for (auto &network_group_info : network_group_infos.release()) {
- auto contexts_str = (network_group_info.is_multi_context ? "Multi Context" : "Single Context");
- std::cout << "Network group name: " << network_group_info.name << " (" << contexts_str << ")" << std::endl;
- auto network_infos = hef.get_network_infos(network_group_info.name);
- CHECK_EXPECTED_AS_STATUS(network_infos, "Failed to parse networks infos");
- for (auto &network_info : network_infos.value()) {
- std::cout << add_tabs(1) << "Network name: " << network_info.name << std::endl;
- if (stream_infos) {
- std::cout << add_tabs(2) << "Stream infos:" << std::endl;
- auto input_stream_infos = hef.get_input_stream_infos(network_info.name);
- CHECK_EXPECTED_AS_STATUS(input_stream_infos, "Failed to parse input stream infos");
- for (auto &stream_info : input_stream_infos.value()) {
- auto shape_str = get_shape_str(stream_info);
- std::cout << add_tabs(3) << "Input " << stream_info.name << " " << shape_str << std::endl;
- }
- auto output_stream_infos = hef.get_output_stream_infos(network_info.name);
- CHECK_EXPECTED_AS_STATUS(output_stream_infos, "Failed to parse output stream infos");
- for (auto &stream_info : output_stream_infos.value()) {
- auto shape_str = get_shape_str(stream_info);
- std::cout << add_tabs(3) << "Output " << stream_info.name << " " << shape_str << std::endl;
- }
- }
- if (vstream_infos) {
- std::cout << add_tabs(2) << "VStream infos:" << std::endl;
- auto input_vstream_infos = hef.get_input_vstream_infos(network_info.name);
- CHECK_EXPECTED_AS_STATUS(input_vstream_infos, "Failed to parse input vstream infos");
- for (auto &vstream_info : input_vstream_infos.value()) {
- auto shape_str = get_shape_str(vstream_info);
- std::cout << add_tabs(3) << "Input " << vstream_info.name << " " << shape_str << std::endl;
- }
- auto output_vstream_infos = hef.get_output_vstream_infos(network_info.name);
- CHECK_EXPECTED_AS_STATUS(output_vstream_infos, "Failed to parse output vstream infos");
- for (auto &vstream_info : output_vstream_infos.value()) {
- auto shape_str = get_shape_str(vstream_info);
- std::cout << add_tabs(3) << "Output " << vstream_info.name << " " << shape_str << std::endl;
- }
- }
- }
- }
- std::cout << std::endl;
+ auto hef_info = hef.get_hef_description(stream_infos, vstream_infos);
+ CHECK_EXPECTED_AS_STATUS(hef_info, "Failed to parse HEF");
+ std::cout << hef_info.release();
return HAILO_SUCCESS;
}
#include "live_printer.hpp"
#include "../common.hpp"
+#include "common/os_utils.hpp"
+#include "common/utils.hpp"
#include <sstream>
#include <iostream>
+
using namespace hailort;
LivePrinter::LivePrinter(std::chrono::milliseconds interval) :
m_interval(interval),
m_stop_event(Event::create_shared(Event::State::not_signalled)),
m_tracks(),
- m_mutex()
+ m_mutex(),
+ m_prev_count(0),
+ m_enable_ansi_escape_sequences(CursorAdjustment())
{
}
if (m_thread.joinable()) {
m_thread.join();
}
- print(false);
+ print();
}
-void LivePrinter::add(std::shared_ptr<Track> track)
+void LivePrinter::add(std::shared_ptr<Track> track, uint8_t level)
{
std::unique_lock<std::mutex> lock(m_mutex);
- m_tracks.emplace_back(track);
+ if (!contains(m_tracks, level)) {
+ m_tracks[level] = {};
+ }
+ m_tracks[level].emplace_back(track);
}
-void LivePrinter::print(bool reset)
+void LivePrinter::print()
{
std::stringstream ss;
uint32_t count = 0;
{
std::unique_lock<std::mutex> lock(m_mutex);
- for (auto &track : m_tracks) {
- count += track->get_text(ss);
+ for (auto &level_pair : m_tracks) {
+ for (auto &track : level_pair.second) {
+ count += track->get_text(ss);
+ }
}
}
-
+ CliCommon::reset_cursor(m_prev_count);
+ // On the first print m_prev_count = 0, so no lines will be deleted
std::cout << ss.str() << std::flush;
- if (reset) {
- CliCommon::reset_cursor(count);
- //TODO: what aout leftovers from prev line?
- }
+ m_prev_count = count;
}
-void LivePrinter::start()
+hailo_status LivePrinter::start()
{
+ for (auto &level_pair : m_tracks) {
+ for (auto &track : level_pair.second) {
+ CHECK_SUCCESS(track->start());
+ }
+ }
+
m_thread = std::thread([this] () {
+ OsUtils::set_current_thread_name("LIVE_PRINTER");
while (true) {
- print(true);
+ print();
auto status = m_stop_event->wait(m_interval);
if (HAILO_TIMEOUT != status) {
break;
}
}
});
-}
\ No newline at end of file
+
+ return HAILO_SUCCESS;
+}
#ifndef _HAILO_HAILORTCLI_RUN2_LIVE_PRINTER_HPP_
#define _HAILO_HAILORTCLI_RUN2_LIVE_PRINTER_HPP_
+#include "common/os_utils.hpp"
#include "hailo/event.hpp"
#include <stdint.h>
#include <chrono>
#include <mutex>
#include <thread>
#include <atomic>
+#include <map>
class LivePrinter final
{
class Track
{
public:
+ Track() : m_started(false)
+ {}
+
+ virtual hailo_status start() = 0;
virtual uint32_t get_text(std::stringstream &ss) = 0;
+
+ protected:
+ bool m_started;
};
LivePrinter(std::chrono::milliseconds interval);
~LivePrinter();
- void add(std::shared_ptr<Track> track);
- void print(bool reset);
- void start();
+ void add(std::shared_ptr<Track> track, uint8_t level); // prints tracks in consecutive order from low-to-high levels
+ void print();
+ hailo_status start();
private:
std::chrono::milliseconds m_interval;
hailort::EventPtr m_stop_event;
- std::vector<std::shared_ptr<Track>> m_tracks;
+ std::map<uint8_t, std::vector<std::shared_ptr<Track>>> m_tracks;
std::thread m_thread;
std::mutex m_mutex;
+ uint32_t m_prev_count;
+ hailort::CursorAdjustment m_enable_ansi_escape_sequences;
};
#endif /* _HAILO_HAILORTCLI_RUN2_LIVE_PRINTER_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file measurement_live_track.cpp
+ * @brief Device measurements live track
+ **/
+
+#include "hailo/hailort.h"
+
+#include "common/device_measurements.hpp"
+#include "common/utils.hpp"
+
+#include "measurement_live_track.hpp"
+
+#include <spdlog/fmt/fmt.h>
+#include <sstream>
+
+
+using namespace hailort;
+
+Expected<std::shared_ptr<MeasurementLiveTrack>> MeasurementLiveTrack::create_shared(Device &device, bool measure_power, bool measure_current,
+ bool measure_temp)
+{
+ std::shared_ptr<PowerMeasurement> power_measurement = nullptr;
+ if (measure_power) {
+ auto power_measurement_exp = PowerMeasurement::create_shared(device, HAILO_POWER_MEASUREMENT_TYPES__POWER);
+ CHECK_EXPECTED(power_measurement_exp);
+ power_measurement = power_measurement_exp.release();
+ }
+
+ std::shared_ptr<PowerMeasurement> current_measurement = nullptr;
+ if (measure_current) {
+ auto current_measurement_exp = PowerMeasurement::create_shared(device, HAILO_POWER_MEASUREMENT_TYPES__CURRENT);
+ CHECK_EXPECTED(current_measurement_exp);
+ current_measurement = current_measurement_exp.release();
+ }
+
+ std::shared_ptr<TemperatureMeasurement> temp_measurement = nullptr;
+ if (measure_temp) {
+ auto temp_measurement_exp = TemperatureMeasurement::create_shared(device);
+ CHECK_EXPECTED(temp_measurement_exp);
+ temp_measurement = temp_measurement_exp.release();
+ }
+
+ auto ptr = make_shared_nothrow<MeasurementLiveTrack>(power_measurement, current_measurement, temp_measurement, device.get_dev_id());
+ CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return ptr;
+}
+
+MeasurementLiveTrack::MeasurementLiveTrack(std::shared_ptr<PowerMeasurement> power_measurement,
+ std::shared_ptr<PowerMeasurement> current_measurement, std::shared_ptr<TemperatureMeasurement> temp_measurement,
+ const std::string &device_id) :
+ LivePrinter::Track(), m_power_measurement(std::move(power_measurement)), m_current_measurement(std::move(current_measurement)),
+ m_temp_measurement(std::move(temp_measurement)), m_device_id(device_id)
+{}
+
+hailo_status MeasurementLiveTrack::start()
+{
+ if (m_power_measurement) {
+ CHECK_SUCCESS(m_power_measurement->start_measurement());
+ }
+
+ if (m_current_measurement) {
+ CHECK_SUCCESS(m_current_measurement->start_measurement());
+ }
+
+ if (m_temp_measurement) {
+ CHECK_SUCCESS(m_temp_measurement->start_measurement());
+ }
+
+ m_started = true;
+
+ return HAILO_SUCCESS;
+}
+
+uint32_t MeasurementLiveTrack::get_text(std::stringstream &ss)
+{
+ if (!m_started) {
+ return 0;
+ }
+
+ auto rows_count = 0;
+
+ if (m_power_measurement || m_current_measurement || m_temp_measurement) {
+ ss << fmt::format("\nMeasurements for device {}\n", m_device_id);
+ rows_count += 2;
+ }
+
+ if (m_power_measurement) {
+ auto measurement_info = m_power_measurement->get_data();
+ if (auto min = measurement_info.min()) {
+ ss << fmt::format("\tMinimum power consumption: {:.2f} {}\n", *min, m_power_measurement->measurement_unit());
+ rows_count++;
+ }
+ if (auto mean = measurement_info.mean()) {
+ ss << fmt::format("\tAverage power consumption: {:.2f} {}\n", *mean, m_power_measurement->measurement_unit());
+ rows_count++;
+ }
+ if (auto max = measurement_info.max()) {
+ ss << fmt::format("\tMaximum power consumption: {:.2f} {}\n", *max, m_power_measurement->measurement_unit());
+ rows_count++;
+ }
+ }
+
+ if (m_current_measurement) {
+ auto measurement_info = m_current_measurement->get_data();
+ if (auto min = measurement_info.min()) {
+ ss << fmt::format("\tMinimum current consumption: {:.2f} {}\n", *min, m_current_measurement->measurement_unit());
+ rows_count++;
+ }
+ if (auto mean = measurement_info.mean()) {
+ ss << fmt::format("\tAverage current consumption: {:.2f} {}\n", *mean, m_current_measurement->measurement_unit());
+ rows_count++;
+ }
+ if (auto max = measurement_info.max()) {
+ ss << fmt::format("\tMaximum current consumption: {:.2f} {}\n", *max, m_current_measurement->measurement_unit());
+ rows_count++;
+ }
+ }
+
+ if (m_temp_measurement) {
+ auto measurement_info = m_temp_measurement->get_data();
+ if (auto min = measurement_info.min()) {
+ ss << fmt::format("\tMinimum chip temperature: {:.2f} {}\n", *min, m_temp_measurement->measurement_unit());
+ rows_count++;
+ }
+ if (auto mean = measurement_info.mean()) {
+ ss << fmt::format("\tAverage chip temperature: {:.2f} {}\n", *mean, m_temp_measurement->measurement_unit());
+ rows_count++;
+ }
+ if (auto max = measurement_info.max()) {
+ ss << fmt::format("\tMaximum chip temperature: {:.2f} {}\n", *max, m_temp_measurement->measurement_unit());
+ rows_count++;
+ }
+ }
+
+ return rows_count;
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file measurement_live_track.hpp
+ * @brief Device measurements live track
+ **/
+
+#ifndef _HAILO_HAILORTCLI_RUN2_MEASUREMENT_LIVE_TRACK_HPP_
+#define _HAILO_HAILORTCLI_RUN2_MEASUREMENT_LIVE_TRACK_HPP_
+
+#include "hailo/hailort.h"
+
+#include "common/device_measurements.hpp"
+
+#include "live_printer.hpp"
+
+
+class MeasurementLiveTrack : public LivePrinter::Track
+{
+public:
+ static hailort::Expected<std::shared_ptr<MeasurementLiveTrack>> create_shared(hailort::Device &vdevice, bool measure_power,
+ bool measure_current, bool measure_temp);
+
+ virtual ~MeasurementLiveTrack() = default;
+ virtual hailo_status start() override;
+ virtual uint32_t get_text(std::stringstream &ss) override;
+
+ MeasurementLiveTrack(std::shared_ptr<PowerMeasurement> power_measurement, std::shared_ptr<PowerMeasurement> current_measurement,
+ std::shared_ptr<TemperatureMeasurement> temp_measurement, const std::string &device_id);
+
+private:
+ std::shared_ptr<PowerMeasurement> m_power_measurement;
+ std::shared_ptr<PowerMeasurement> m_current_measurement;
+ std::shared_ptr<TemperatureMeasurement> m_temp_measurement;
+
+ std::string m_device_id;
+};
+
+#endif /* _HAILO_HAILORTCLI_RUN2_MEASUREMENT_LIVE_TRACK_HPP_ */
\ No newline at end of file
**/
#include "network_live_track.hpp"
+#include "../infer_stats_printer.hpp"
+
#include <spdlog/fmt/fmt.h>
#include <sstream>
-NetworkLiveTrack::NetworkLiveTrack(const std::string &name) :
- m_name(name), m_count(0), m_last_get_time(std::chrono::steady_clock::now())
+NetworkLiveTrack::NetworkLiveTrack(const std::string &name, std::shared_ptr<ConfiguredNetworkGroup> cng, LatencyMeterPtr overall_latency_meter) :
+ m_name(name), m_count(0), m_last_get_time(), m_cng(cng), m_overall_latency_meter(overall_latency_meter)
{
}
+hailo_status NetworkLiveTrack::start()
+{
+ m_last_get_time = std::chrono::steady_clock::now();
+ m_count = 0;
+ m_started = true;
+
+ return HAILO_SUCCESS;
+}
+
uint32_t NetworkLiveTrack::get_text(std::stringstream &ss)
{
+ if (!m_started) {
+ return 0;
+ }
+
auto elapsed_time = std::chrono::steady_clock::now() - m_last_get_time;
auto count = m_count.load();
auto fps = count / std::chrono::duration<double>(elapsed_time).count();
- ss << fmt::format("{} - fps: {:.2f}\n", m_name, fps);
+ ss << fmt::format("{}:\n\t| fps: {:.2f}", m_name, fps);
+
+ auto hw_latency_measurement = m_cng->get_latency_measurement();
+ if (hw_latency_measurement) {
+ ss << fmt::format(" | hw latency: {:.2f} ms", InferResultsFormatUtils::latency_result_to_ms(hw_latency_measurement->avg_hw_latency));
+ }
+ else if (HAILO_NOT_AVAILABLE != hw_latency_measurement.status()) { // HAILO_NOT_AVAILABLE is a valid error, we ignore it
+ ss << fmt::format(" | hw latency: failed with status={}", hw_latency_measurement.status());
+ }
- return 1;
+ if (m_overall_latency_meter) {
+ auto overall_latency_measurement = m_overall_latency_meter->get_latency(true);
+ if (overall_latency_measurement) {
+ ss << fmt::format(" | overall latency: {:.2f} ms", InferResultsFormatUtils::latency_result_to_ms(*overall_latency_measurement));
+ }
+ else if (HAILO_NOT_AVAILABLE != overall_latency_measurement.status()) { // HAILO_NOT_AVAILABLE is a valid error, we ignore it
+ ss << fmt::format(" | overall latency: failed with status={}", overall_latency_measurement.status());
+ }
+ }
+ ss << "\n";
+
+ return 2;
}
void NetworkLiveTrack::progress()
{
+ if (!m_started) {
+ return;
+ }
+
m_count++;
}
\ No newline at end of file
* @brief Network live track
**/
-#include "live_printer.hpp"
-
#ifndef _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_
#define _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_
+#include "hailo/hailort.h"
+#include "hailo/network_group.hpp"
+
+#include "common/latency_meter.hpp"
+
+#include "live_printer.hpp"
+
+
class NetworkLiveTrack : public LivePrinter::Track
{
public:
- NetworkLiveTrack(const std::string &name);
+ NetworkLiveTrack(const std::string &name, std::shared_ptr<hailort::ConfiguredNetworkGroup> cng, hailort::LatencyMeterPtr overall_latency_meter);
virtual ~NetworkLiveTrack() = default;
- uint32_t get_text(std::stringstream &ss);
+ virtual hailo_status start() override;
+ virtual uint32_t get_text(std::stringstream &ss) override;
void progress();
private:
std::string m_name;
std::atomic<uint32_t> m_count;
std::chrono::time_point<std::chrono::steady_clock> m_last_get_time;
+ std::shared_ptr<hailort::ConfiguredNetworkGroup> m_cng;
+ hailort::LatencyMeterPtr m_overall_latency_meter;
};
#endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_ */
\ No newline at end of file
* @brief Run network on hailo device
**/
-#include "network_runner.hpp"
+#include "hailo/hailort.h"
+#include "hailo/hailort_common.hpp"
+#include "hailo/hailort_defaults.hpp"
+
#include "common/async_thread.hpp"
-#include "hailort_defaults.hpp" //TODO: not API
+#include "common/file_utils.hpp"
+#include "common/latency_meter.hpp"
+
+#include "network_runner.hpp"
+
using namespace hailort;
+
+class SignalEventScopeGuard final
+{
+public:
+ SignalEventScopeGuard(Event &event) : m_event(event)
+ {}
+
+ ~SignalEventScopeGuard()
+ {
+ m_event.signal();
+ }
+
+ Event &m_event;
+};
+
+
//TODO: duplicated
static hailo_status wait_for_threads(std::vector<AsyncThreadPtr<hailo_status>> &threads)
{
{
}
+NetworkParams::NetworkParams() : hef_path(), net_group_name(), vstream_params(), scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN),
+ batch_size(HAILO_DEFAULT_BATCH_SIZE), scheduler_threshold(0), scheduler_timeout_ms(0), framerate(UNLIMITED_FRAMERATE), measure_hw_latency(false),
+ measure_overall_latency(false)
+{
+}
+
NetworkRunner::NetworkRunner(const NetworkParams ¶ms, const std::string &name,
- std::vector<InputVStream> &&input_vstreams, std::vector<OutputVStream> &&output_vstreams)
+ std::vector<InputVStream> &&input_vstreams, std::vector<OutputVStream> &&output_vstreams,
+ std::shared_ptr<ConfiguredNetworkGroup> cng, LatencyMeterPtr overall_latency_meter)
: m_params(params), m_name(name), m_input_vstreams(std::move(input_vstreams)),
- m_output_vstreams(std::move(output_vstreams))
+ m_output_vstreams(std::move(output_vstreams)), m_cng(cng), m_overall_latency_meter(overall_latency_meter)
{
}
net_group_name = net_groups_names[0];
}
- auto interface = vdevice.get_default_streams_interface();
- CHECK_EXPECTED(interface, "Failed to get default streams interface");
-
- auto cfg_params = hef->create_configure_params(*interface, net_group_name);
+ auto cfg_params = vdevice.create_configure_params(hef.value(), net_group_name);
CHECK_EXPECTED(cfg_params);
cfg_params->batch_size = params.batch_size;
+ if (params.measure_hw_latency) {
+ cfg_params->latency |= HAILO_LATENCY_MEASURE;
+ }
auto cfgr_net_groups = vdevice.configure(hef.value(), {{net_group_name, cfg_params.value()}});
CHECK_EXPECTED(cfgr_net_groups);
assert(1 == cfgr_net_groups->size());
auto cfgr_net_group = cfgr_net_groups.value()[0];
- CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_threshold(params.scheduler_threshold));
- CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_timeout(std::chrono::milliseconds(params.scheduler_timeout_ms)));
+ if (HAILO_SCHEDULING_ALGORITHM_NONE!= params.scheduling_algorithm) {
+ CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_threshold(params.scheduler_threshold));
+ CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_timeout(std::chrono::milliseconds(params.scheduler_timeout_ms)));
+ CHECK_SUCCESS_AS_EXPECTED(cfgr_net_group->set_scheduler_priority(params.scheduler_priority));
+ }
std::map<std::string, hailo_vstream_params_t> vstreams_params;
for (auto &vstream_params : params.vstream_params) {
auto vstreams = create_vstreams(*cfgr_net_group, vstreams_params);
CHECK_EXPECTED(vstreams);
- auto net_runner = make_shared_nothrow<NetworkRunner>(params, net_group_name, std::move(vstreams->first), std::move(vstreams->second));
+ LatencyMeterPtr overall_latency_meter = nullptr;
+ if (params.measure_overall_latency) {
+ CHECK_AS_EXPECTED((1 == vstreams->first.size()), HAILO_INVALID_OPERATION,
+ "Overall latency measurement over multiple inputs network is not supported");
+
+ std::set<std::string> output_names;
+ for (auto &output_vstream : vstreams->second) {
+ output_names.insert(output_vstream.name());
+ }
+
+ overall_latency_meter = make_shared_nothrow<LatencyMeter>(output_names, OVERALL_LATENCY_TIMESTAMPS_LIST_LENGTH);
+ CHECK_NOT_NULL_AS_EXPECTED(overall_latency_meter, HAILO_OUT_OF_HOST_MEMORY);
+ }
+ auto net_runner = make_shared_nothrow<NetworkRunner>(params, net_group_name, std::move(vstreams->first),
+ std::move(vstreams->second), cfgr_net_group, overall_latency_meter);
CHECK_NOT_NULL_AS_EXPECTED(net_runner, HAILO_OUT_OF_HOST_MEMORY);
return net_runner;
}
-hailo_status NetworkRunner::run_input_vstream(InputVStream &vstream)
+Expected<BufferPtr> NetworkRunner::create_dataset_from_input_file(const std::string &file_path,
+ const InputVStream &input_vstream)
{
- auto dataset = Buffer::create(vstream.get_frame_size(), 0xAB);
- CHECK_EXPECTED_AS_STATUS(dataset);
+ auto buffer = read_binary_file(file_path);
+ CHECK_EXPECTED(buffer);
+ CHECK_AS_EXPECTED(0 == (buffer->size() % input_vstream.get_frame_size()), HAILO_INVALID_ARGUMENT,
+ "Input file ({}) size {} must be a multiple of the frame size {} ({})",
+ file_path, buffer->size(), input_vstream.get_frame_size(), input_vstream.name());
+
+ auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return buffer_ptr;
+}
+
+
+Expected<BufferPtr> NetworkRunner::create_constant_dataset(const InputVStream &input_vstream)
+{
+ const uint8_t const_byte = 0xAB;
+ auto constant_buffer = Buffer::create_shared(input_vstream.get_frame_size(), const_byte);
+ CHECK_EXPECTED(constant_buffer);
+
+ return constant_buffer.release();
+}
+
+hailo_status NetworkRunner::run_input_vstream(InputVStream &vstream, Event &shutdown_event, BufferPtr dataset,
+ LatencyMeterPtr overall_latency_meter)
+{
+ auto signal_event_scope_guard = SignalEventScopeGuard(shutdown_event);
+
auto last_write_time = std::chrono::steady_clock::now();
auto framerate_interval = std::chrono::duration<double>(1) / m_params.framerate;
+ size_t buffer_offset = 0;
while(true) {
- auto status = vstream.write(MemoryView(dataset.value()));
+ if (overall_latency_meter) {
+ overall_latency_meter->add_start_sample(std::chrono::steady_clock::now().time_since_epoch());
+ }
+ auto status = vstream.write(MemoryView((dataset->data() + buffer_offset), vstream.get_frame_size()));
if (status == HAILO_STREAM_ABORTED_BY_USER) {
return status;
}
CHECK_SUCCESS(status);
+ buffer_offset += vstream.get_frame_size();
+ buffer_offset %= dataset->size();
if (m_params.framerate != UNLIMITED_FRAMERATE) {
auto elapsed_time = std::chrono::steady_clock::now() - last_write_time;
return HAILO_SUCCESS;
}
-hailo_status NetworkRunner::run_output_vstream(OutputVStream &vstream, bool first, std::shared_ptr<NetworkLiveTrack> net_live_track)
+hailo_status NetworkRunner::run_output_vstream(OutputVStream &vstream, bool first, std::shared_ptr<NetworkLiveTrack> net_live_track,
+ Event &shutdown_event, LatencyMeterPtr overall_latency_meter)
{
+ auto signal_event_scope_guard = SignalEventScopeGuard(shutdown_event);
+
auto result = Buffer::create(vstream.get_frame_size());
CHECK_EXPECTED_AS_STATUS(result);
while(true) {
return status;
}
CHECK_SUCCESS(status);
+ if (overall_latency_meter) {
+ overall_latency_meter->add_end_sample(vstream.name(), std::chrono::steady_clock::now().time_since_epoch());
+ }
if (first) {
net_live_track->progress();
}
return HAILO_SUCCESS;
}
-hailo_status NetworkRunner::run(Event &shutdown_event, LivePrinter &live_printer)
+hailo_status NetworkRunner::run(Event &shutdown_event, LivePrinter &live_printer, Barrier &barrier)
{
+ auto ang = std::unique_ptr<ActivatedNetworkGroup>(nullptr);
+ if (HAILO_SCHEDULING_ALGORITHM_NONE == m_params.scheduling_algorithm) {
+ auto ang_exp = m_cng->activate();
+ if (!ang_exp) {
+ barrier.terminate();
+ }
+ CHECK_EXPECTED_AS_STATUS(ang_exp);
+ ang = ang_exp.release();
+ }
+
+ auto net_live_track = std::make_shared<NetworkLiveTrack>(m_name, m_cng, m_overall_latency_meter);
+ live_printer.add(net_live_track, 1); //support progress over multiple outputs
+ barrier.arrive_and_wait();
+
std::vector<AsyncThreadPtr<hailo_status>> threads;
for (auto &input_vstream : m_input_vstreams) {
- threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>([this, &input_vstream](){
- return run_input_vstream(input_vstream);
+ BufferPtr dataset = nullptr;
+ for (auto ¶ms : m_params.vstream_params) {
+ if ((input_vstream.name() == params.name) && (!params.input_file_path.empty())) {
+ auto dataset_exp = create_dataset_from_input_file(params.input_file_path, input_vstream);
+ CHECK_EXPECTED_AS_STATUS(dataset_exp);
+ dataset = dataset_exp.release();
+ }
+ }
+ if (nullptr == dataset) {
+ auto dataset_exp = create_constant_dataset(input_vstream);
+ CHECK_EXPECTED_AS_STATUS(dataset_exp);
+ dataset = dataset_exp.release();
+ }
+
+ threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("SEND", [this, &input_vstream, &shutdown_event,
+ dataset](){
+ return run_input_vstream(input_vstream, shutdown_event, dataset, m_overall_latency_meter);
}));
}
- auto net_live_track = std::make_shared<NetworkLiveTrack>(m_name);
- live_printer.add(net_live_track);//support progress over multiple outputs
-
- bool first = true;//TODO: check with multiple outputs
+ bool first = true; //TODO: check with multiple outputs
for (auto &output_vstream : m_output_vstreams) {
- threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>([&output_vstream, first, net_live_track](){
- return run_output_vstream(output_vstream, first, net_live_track);
+ threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("RECV", [this, &output_vstream, first, net_live_track,
+ &shutdown_event](){
+ return run_output_vstream(output_vstream, first, net_live_track, shutdown_event, m_overall_latency_meter);
}));
first = false;
}
- //TODO: signal a barrier that we should start infer and timer. return threads and move stop outside?
+ //TODO: return threads and move stop outside?
CHECK_SUCCESS(shutdown_event.wait(HAILO_INFINITE_TIMEOUT));
stop();
return wait_for_threads(threads);
#ifndef _HAILO_HAILORTCLI_RUN2_NETWORK_RUNNER_HPP_
#define _HAILO_HAILORTCLI_RUN2_NETWORK_RUNNER_HPP_
+#include "common/barrier.hpp"
+
#include "hailo/vdevice.hpp"
#include "hailo/vstream.hpp"
#include "hailo/event.hpp"
#include "hailo/network_group.hpp"
#include "hailo/expected.hpp"
+#include "hailo/buffer.hpp"
+
+#include "../hailortcli.hpp"
#include "live_printer.hpp"
#include "network_live_track.hpp"
std::string name;
hailo_vstream_params_t params;
+ std::string input_file_path;
};
struct NetworkParams
{
+ NetworkParams();
+
std::string hef_path;
std::string net_group_name;
std::vector<VStreamParams> vstream_params;
+ hailo_scheduling_algorithm_t scheduling_algorithm;
// Network parameters
uint16_t batch_size;
uint32_t scheduler_threshold;
uint32_t scheduler_timeout_ms;
+ uint8_t scheduler_priority;
// Run parameters
uint32_t framerate;
+
+ bool measure_hw_latency;
+ bool measure_overall_latency;
};
class NetworkRunner
{
public:
NetworkRunner(const NetworkParams ¶ms, const std::string &name,
- std::vector<hailort::InputVStream> &&input_vstreams, std::vector<hailort::OutputVStream> &&output_vstreams);
+ std::vector<hailort::InputVStream> &&input_vstreams, std::vector<hailort::OutputVStream> &&output_vstreams,
+ std::shared_ptr<hailort::ConfiguredNetworkGroup> cng, hailort::LatencyMeterPtr overall_latency_meter);
static hailort::Expected<std::shared_ptr<NetworkRunner>> create_shared(hailort::VDevice &vdevice, const NetworkParams ¶ms);
- hailo_status run(hailort::Event &shutdown_event, LivePrinter &live_printer);
+ hailo_status run(hailort::Event &shutdown_event, LivePrinter &live_printer, hailort::Barrier &barrier);
void stop();
private:
static hailort::Expected<std::pair<std::vector<hailort::InputVStream>, std::vector<hailort::OutputVStream>>> create_vstreams(
hailort::ConfiguredNetworkGroup &net_group, const std::map<std::string, hailo_vstream_params_t> ¶ms);
- hailo_status run_input_vstream(hailort::InputVStream &vstream);
- static hailo_status run_output_vstream(hailort::OutputVStream &vstream, bool first, std::shared_ptr<NetworkLiveTrack> net_live_track);
+ hailo_status run_input_vstream(hailort::InputVStream &vstream, hailort::Event &shutdown_event, hailort::BufferPtr dataset,
+ hailort::LatencyMeterPtr overall_latency_meter);
+ static hailo_status run_output_vstream(hailort::OutputVStream &vstream, bool first, std::shared_ptr<NetworkLiveTrack> net_live_track,
+ hailort::Event &shutdown_event, hailort::LatencyMeterPtr overall_latency_meter);
+static hailort::Expected<hailort::BufferPtr> create_constant_dataset(const hailort::InputVStream &input_vstream);
+static hailort::Expected<hailort::BufferPtr> create_dataset_from_input_file(const std::string &file_path, const hailort::InputVStream &input_vstream);
const NetworkParams &m_params;//TODO: copy instead of ref?
std::string m_name;
std::vector<hailort::InputVStream> m_input_vstreams;
std::vector<hailort::OutputVStream> m_output_vstreams;
+ std::shared_ptr<hailort::ConfiguredNetworkGroup> m_cng;
+ hailort::LatencyMeterPtr m_overall_latency_meter;
};
#endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_RUNNER_HPP_ */
\ No newline at end of file
#include "run2_command.hpp"
#include "live_printer.hpp"
#include "timer_live_track.hpp"
+#include "measurement_live_track.hpp"
#include "network_runner.hpp"
+#include "common/barrier.hpp"
#include "common/async_thread.hpp"
#include "hailo/vdevice.hpp"
#include "hailo/hef.hpp"
add_option("name", m_params.name, "vStream name")
->check(VStreamNameValidator(hef_path_option, net_group_name_option));
+ add_option("--input-file", m_params.input_file_path,
+ "Input file path. If not given, random data will be used. File format should be raw binary data with size that is a factor of the input shape size")
+ ->default_val("");
+
auto format_opt_group = add_option_group("Format");
format_opt_group->add_option("--type", m_params.params.user_buffer_format.type, "Format type")
->transform(HailoCheckedTransformer<hailo_format_type_t>({
{ "nchw", HAILO_FORMAT_ORDER_NCHW },
{ "yuy2", HAILO_FORMAT_ORDER_YUY2 },
{ "nv12", HAILO_FORMAT_ORDER_NV12 },
- { "nv21", HAILO_FORMAT_ORDER_NV21 }
+ { "nv21", HAILO_FORMAT_ORDER_NV21 },
+ { "rgb4", HAILO_FORMAT_ORDER_RGB4 },
+ { "i420", HAILO_FORMAT_ORDER_I420 }
}))
->default_val("auto");
add_flag_callback(format_opt_group, "-q,--quantized,!--no-quantized", "Whether or not data is quantized",
[this](bool result){
m_params.params.user_buffer_format.flags = result ?
- m_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED :
+ static_cast<hailo_format_flags_t>(m_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED) :
static_cast<hailo_format_flags_t>(m_params.params.user_buffer_format.flags & (~HAILO_FORMAT_FLAGS_QUANTIZED));})
->run_callback_for_default()
->default_val(true); // default_val() must be after run_callback_for_default()
net_params->add_option("--batch-size", m_params.batch_size, "Batch size")->default_val(HAILO_DEFAULT_BATCH_SIZE);
net_params->add_option("--scheduler-threshold", m_params.scheduler_threshold, "Scheduler threshold")->default_val(0);
net_params->add_option("--scheduler-timeout", m_params.scheduler_timeout_ms, "Scheduler timeout in milliseconds")->default_val(0);
+ net_params->add_option("--scheduler-priority", m_params.scheduler_priority, "Scheduler priority")->default_val(HAILO_SCHEDULER_PRIORITY_NORMAL);
auto run_params = add_option_group("Run Parameters");
run_params->add_option("--framerate", m_params.framerate, "Input vStreams framerate")->default_val(UNLIMITED_FRAMERATE);
+ // TODO: support multiple scheduling algorithms
+ m_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN;
+
add_vstream_app_subcom(hef_path_option, net_group_name_option);
}
const std::vector<NetworkParams>& get_network_params();
std::chrono::seconds get_time_to_run();
+ std::vector<hailo_device_id_t> get_dev_ids();
+ uint32_t get_device_count();
+ bool get_measure_power();
+ bool get_measure_current();
+ bool get_measure_temp();
+ bool get_multi_process_service();
+ const std::string &get_group_id();
+
+ void set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm);
+ void set_measure_latency();
private:
void add_net_app_subcom();
std::vector<NetworkParams> m_network_params;
uint32_t m_time_to_run;
+ std::vector<std::string> m_device_id;
+ uint32_t m_device_count;
+ bool m_multi_process_service;
+ std::string m_group_id;
+
+ bool m_measure_hw_latency;
+ bool m_measure_overall_latency;
+
+ bool m_measure_power;
+ bool m_measure_current;
+ bool m_measure_temp;
};
+
Run2::Run2() : CLI::App("Run networks (preview)", "run2")
{
add_net_app_subcom();
add_option("-t,--time-to-run", m_time_to_run, "Time to run (seconds)")
->default_val(DEFAULT_TIME_TO_RUN_SECONDS)
->check(CLI::PositiveNumber);
+
+ auto vdevice_options_group = add_option_group("VDevice Options");
+
+ auto dev_id_opt = vdevice_options_group->add_option("-s,--device-id", m_device_id,
+ "Device id, same as returned from `hailortcli scan` command. For multiple devices, use space as separator.");
+
+ vdevice_options_group->add_option("--device-count", m_device_count, "VDevice device count")
+ ->default_val(HAILO_DEFAULT_DEVICE_COUNT)
+ ->check(CLI::PositiveNumber)
+ ->excludes(dev_id_opt);
+
+ vdevice_options_group->add_flag("--multi-process-service", m_multi_process_service, "VDevice multi process service")
+ ->default_val(false);
+
+ vdevice_options_group->add_option("--group-id", m_group_id, "VDevice group id")
+ ->default_val(HAILO_DEFAULT_VDEVICE_GROUP_ID);
+
+ auto measurement_options_group = add_option_group("Measurement Options");
+
+ auto measure_power_opt = measurement_options_group->add_flag("--measure-power", m_measure_power, "Measure power consumption")
+ ->default_val(false);
+
+ measurement_options_group->add_flag("--measure-current", m_measure_current, "Measure current")->excludes(measure_power_opt)
+ ->default_val(false);
+
+ measurement_options_group->add_flag("--measure-latency", m_measure_hw_latency, "Measure network latency")
+ ->default_val(false);
+
+ measurement_options_group->add_flag("--measure-overall-latency", m_measure_overall_latency, "Measure overall latency measurement")
+ ->default_val(false);
+
+ measurement_options_group->add_flag("--measure-temp", m_measure_temp, "Measure chip temperature")
+ ->default_val(false);
}
void Run2::add_net_app_subcom()
return std::chrono::seconds(m_time_to_run);
}
+bool Run2::get_measure_power()
+{
+ return m_measure_power;
+}
+
+bool Run2::get_measure_current()
+{
+ return m_measure_current;
+}
+
+bool Run2::get_measure_temp()
+{
+ return m_measure_temp;
+}
+
+std::vector<hailo_device_id_t> Run2::get_dev_ids()
+{
+ std::vector<hailo_device_id_t> res;
+ res.reserve(m_device_id.size());
+ for (auto &id_str : m_device_id) {
+ hailo_device_id_t id = {};
+ std::memset(id.id, 0, sizeof(id.id));
+ std::strncpy(id.id, id_str.c_str(), sizeof(id.id) - 1);
+ res.push_back(id);
+ }
+ return res;
+}
+
+uint32_t Run2::get_device_count()
+{
+ return m_device_count;
+}
+
+void Run2::set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm)
+{
+ for (auto ¶ms: m_network_params) {
+ params.scheduling_algorithm = scheduling_algorithm;
+ }
+}
+
+void Run2::set_measure_latency()
+{
+ for (auto ¶ms: m_network_params) {
+ params.measure_hw_latency = m_measure_hw_latency;
+ params.measure_overall_latency = m_measure_overall_latency;
+ }
+}
+
+bool Run2::get_multi_process_service()
+{
+ return m_multi_process_service;
+}
+
+const std::string &Run2::get_group_id()
+{
+ return m_group_id;
+}
+
+
/** Run2Command */
Run2Command::Run2Command(CLI::App &parent_app) : Command(parent_app.add_subcommand(std::make_shared<Run2>()))
{
return last_error_status;
}
+bool is_valid_ip(const std::string &ip)
+{
+ int a,b,c,d;
+ return (4 == sscanf(ip.c_str(),"%d.%d.%d.%d", &a, &b, &c, &d)) &&
+ IS_FIT_IN_UINT8(a) && IS_FIT_IN_UINT8(b) && IS_FIT_IN_UINT8(c) && IS_FIT_IN_UINT8(d);
+}
+
hailo_status Run2Command::execute()
{
Run2 *app = reinterpret_cast<Run2*>(m_app);
+ app->set_measure_latency();
+
if (0 == app->get_network_params().size()) {
LOGGER__ERROR("Nothing to run");
return HAILO_INVALID_OPERATION;
LOGGER__WARN("\"hailortcli run2\" is in preview. It is recommended to use \"hailortcli run\" command for a single network group");
}
- // TODO: support multi-device. maybe get all by default?
hailo_vdevice_params_t vdevice_params = {};
CHECK_SUCCESS(hailo_init_vdevice_params(&vdevice_params));
+ auto dev_ids = app->get_dev_ids();
+ if (!dev_ids.empty()) {
+ vdevice_params.device_count = static_cast<uint32_t>(dev_ids.size());
+ vdevice_params.device_ids = dev_ids.data();
+
+ // Disable scheduler for eth VDevice
+ if ((1 == dev_ids.size()) && (is_valid_ip(dev_ids[0].id))) {
+ vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
+ CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "On Ethernet inference only one model is allowed");
+ app->set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
+ }
+ } else {
+ vdevice_params.device_count = app->get_device_count();
+ }
+
+ vdevice_params.group_id = app->get_group_id().c_str();
+ vdevice_params.multi_process_service = app->get_multi_process_service();
+
auto vdevice = VDevice::create(vdevice_params);
CHECK_EXPECTED_AS_STATUS(vdevice);
for (auto &net_params : app->get_network_params()) {
auto net_runner = NetworkRunner::create_shared(*vdevice->get(), net_params);
CHECK_EXPECTED_AS_STATUS(net_runner);
+
net_runners.emplace_back(net_runner.release());
}
- LivePrinter live_printer(std::chrono::seconds(1));
- live_printer.add(std::make_shared<TimerLiveTrack>(app->get_time_to_run()));
+ auto live_printer = std::make_unique<LivePrinter>(std::chrono::seconds(1));
+
+ live_printer->add(std::make_shared<TimerLiveTrack>(app->get_time_to_run()), 0);
auto shutdown_event = Event::create(Event::State::not_signalled);
CHECK_EXPECTED_AS_STATUS(shutdown_event);
std::vector<AsyncThreadPtr<hailo_status>> threads;
+ Barrier barrier(net_runners.size() + 1); // We wait for all nets to finish activation + this thread to start sampling
for (auto &net_runner : net_runners) {
- threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>([&net_runner, &shutdown_event, &live_printer](){
- return net_runner->run(shutdown_event.value(), live_printer);
+ threads.emplace_back(std::make_unique<AsyncThread<hailo_status>>("NG_INFER", [&net_runner, &shutdown_event,
+ &live_printer, &barrier](){
+ return net_runner->run(shutdown_event.value(), *live_printer, barrier);
}));
}
+
+ auto physical_devices = vdevice.value()->get_physical_devices();
+ CHECK_EXPECTED_AS_STATUS(physical_devices);
+
+ for (auto &device : physical_devices.value()) {
+ auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), app->get_measure_power(),
+ app->get_measure_current(), app->get_measure_temp());
+ CHECK_EXPECTED_AS_STATUS(measurement_live_track);
+ live_printer->add(measurement_live_track.release(), 2);
+ }
+
// TODO: wait for all nets before starting timer. start() should update TimerLiveTrack to start. or maybe append here but first in vector...
- live_printer.start();
- std::this_thread::sleep_for(app->get_time_to_run());
+ barrier.arrive_and_wait();
+ CHECK_SUCCESS(live_printer->start());
+ auto status = shutdown_event->wait(app->get_time_to_run());
+ if (HAILO_TIMEOUT != status) {
+ // if shutdown_event is signaled its because one of the send/recv threads failed
+ LOGGER__ERROR("Encountered error during inference. See log for more information.");
+ }
+ live_printer.reset(); // Ensures that the final print will include real values and not with values of when streams are already aborted.
shutdown_event->signal();
return wait_for_threads(threads);
}
\ No newline at end of file
#include <sstream>
TimerLiveTrack::TimerLiveTrack(std::chrono::milliseconds duration) :
- m_duration(duration), m_start(std::chrono::steady_clock::now())
+ LivePrinter::Track(), m_duration(duration), m_start_time()
{
}
+hailo_status TimerLiveTrack::start()
+{
+ m_start_time = std::chrono::steady_clock::now();
+ m_started = true;
+
+ return HAILO_SUCCESS;
+}
+
uint32_t TimerLiveTrack::get_text(std::stringstream &ss)
{
+ if (!m_started) {
+ return 0;
+ }
static const uint32_t MAX_PROGRESS_BAR_WIDTH = 20;
- auto elapsed_time = std::chrono::steady_clock::now() - m_start;
+ auto elapsed_time = std::chrono::steady_clock::now() - m_start_time;
auto eta = std::chrono::seconds(std::max<int32_t>(0, static_cast<int32_t>(std::round(std::chrono::duration<double>(m_duration - elapsed_time).count())))); // std::chrono::round is from C++17
auto elapsed_percentage = std::min<uint32_t>(100, static_cast<uint32_t>(std::round(std::chrono::duration<double>(100 * elapsed_time / m_duration).count())));
auto progress_bar_width = std::max<uint32_t>(1, std::min<uint32_t>(MAX_PROGRESS_BAR_WIDTH,
public:
TimerLiveTrack(std::chrono::milliseconds duration);
virtual ~TimerLiveTrack() = default;
+ virtual hailo_status start() override;
virtual uint32_t get_text(std::stringstream &ss) override;
private:
std::chrono::milliseconds m_duration;
- std::chrono::time_point<std::chrono::steady_clock> m_start;
+ std::chrono::time_point<std::chrono::steady_clock> m_start_time;
};
#endif /* _HAILO_HAILORTCLI_RUN2_TIMER_LIVE_TRACK_HPP_ */
\ No newline at end of file
#include "hailortcli.hpp"
#include "inference_progress.hpp"
#include "infer_stats_printer.hpp"
-#include "temp_measurement.hpp"
#include "graph_printer.hpp"
#if defined(__GNUC__)
// TODO: Support on windows (HRT-5919)
#include "common/barrier.hpp"
#include "common/latency_meter.hpp"
#include "common/filesystem.hpp"
+#include "common/device_measurements.hpp"
+#include "hailo/hailort.h"
#include "hailo/network_group.hpp"
#include "hailo/hef.hpp"
#include "hailo/vstream.hpp"
#include "hailo/vdevice.hpp"
+#include "hailo/transform.hpp"
#include "spdlog/fmt/fmt.h"
They're useful for simple interprocess communication. */
#define USER_SIGNAL (SIGUSR1)
-constexpr size_t OVERALL_LATENCY_TIMESTAMPS_LIST_LENGTH (512);
constexpr uint32_t DEFAULT_TIME_TO_RUN_SECONDS = 5;
#ifndef HAILO_EMULATOR
constexpr std::chrono::milliseconds TIME_TO_WAIT_FOR_CONFIG(300);
"Collect runtime data to be used by the Profiler");
static const char *JSON_SUFFIX = ".json";
collect_runtime_data_subcommand->add_option("--output-path", params.runtime_data.runtime_data_output_path,
- fmt::format("Runtime data output file path\n'{}' will be replaced with the current running hef",
- RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER))
+ fmt::format("Runtime data output file path\n'{}' will be replaced with the current running hef", RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER)
+ + "\nIn case of multiple-devices, <device-id>_ will be added as prefix to each file")
->default_val(fmt::format("runtime_data_{}.json", RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER))
->check(FileSuffixValidator(JSON_SUFFIX));
collect_runtime_data_subcommand->add_option("--batch-to-measure", params.runtime_data.batch_to_measure_str,
params.time_to_run = DEFAULT_TIME_TO_RUN_SECONDS;
}
- PARSE_CHECK(((!params.runtime_data.collect_runtime_data) || (params.vdevice_params.device_count == 1)),
- "Passing runtime data is not supported for multiple devices");
-
PARSE_CHECK((!(params.runtime_data.collect_runtime_data && params.vdevice_params.multi_process_service)),
"Passing runtime data is not supported for multi process service");
auto first = true;
for (auto& recv_object : recv_objects) {
auto &frames_recieved = frames_recieved_per_output[output_index];
- results.emplace_back(std::make_unique<AsyncThread<hailo_status>>(
+ results.emplace_back(std::make_unique<AsyncThread<hailo_status>>("RECV",
[network_progress_bar, params, &recv_object, &output_buffers, first, &barrier, &overall_latency_meter,
&frames_recieved, batch_size]() {
auto res = recv_loop(params, recv_object.get(), network_progress_bar, barrier, overall_latency_meter,
++output_index;
}
for (auto &send_object : send_objects) {
- results.emplace_back(std::make_unique<AsyncThread<hailo_status>>(
+ results.emplace_back(std::make_unique<AsyncThread<hailo_status>>("SEND",
[params, &send_object, &input_dataset, &barrier, &overall_latency_meter, batch_size]() -> hailo_status {
auto res = send_loop(params, send_object.get(), input_dataset, barrier, overall_latency_meter, batch_size);
if (HAILO_SUCCESS != res) {
CHECK_AS_EXPECTED(contains(recv_objects_per_network_group[network_group_index], network_name_pair.first), HAILO_INTERNAL_FAILURE,
"Not all networks was parsed correctly.");
auto network_name = network_name_pair.first;
- networks_threads_status[network_group_index].emplace_back(std::make_unique<AsyncThread<hailo_status>>(
+ networks_threads_status[network_group_index].emplace_back(std::make_unique<AsyncThread<hailo_status>>(fmt::format("NG_INFER {}", network_group_index),
[network_group_index, &configured_net_groups, &input_datasets, &output_buffers, ¶ms, &send_objects_per_network_group,
&recv_objects_per_network_group, network_name, &progress_bar, &networks_results]() {
return run_streaming_impl(configured_net_groups[network_group_index], input_datasets[network_group_index],
}
bool should_measure_temp = params.measure_temp;
- TemperatureMeasurement temp_measure(device);
+ auto temp_measure = TemperatureMeasurement::create_shared(device);
+ CHECK_EXPECTED(temp_measure);
if (should_measure_temp) {
- auto status = temp_measure.start_measurement();
+ auto status = temp_measure.value()->start_measurement();
CHECK_SUCCESS_AS_EXPECTED(status, "Failed to get chip's temperature");
}
}
if (should_measure_temp) {
- temp_measure.stop_measurement();
- auto temp_measure_p = make_shared_nothrow<TempMeasurementData>(temp_measure.get_data());
+ temp_measure.value()->stop_measurement();
+ auto temp_measure_p = make_shared_nothrow<AccumulatorResults>(temp_measure.value()->get_data());
CHECK_NOT_NULL_AS_EXPECTED(temp_measure_p, HAILO_OUT_OF_HOST_MEMORY);
auto status = inference_result.set_temp_measurement(device.get_dev_id(), std::move(temp_measure_p));
CHECK_SUCCESS_AS_EXPECTED(status);
std::map<std::string, std::shared_ptr<TemperatureMeasurement>> temp_measurements;
if (params.measure_temp) {
for (auto &device : physical_devices) {
- auto temp_measure = make_shared_nothrow<TemperatureMeasurement>(device);
- CHECK_NOT_NULL_AS_EXPECTED(temp_measure, HAILO_OUT_OF_HOST_MEMORY);
- auto status = temp_measure->start_measurement();
+ auto temp_measure = TemperatureMeasurement::create_shared(device);
+ CHECK_EXPECTED(temp_measure);
+ auto status = temp_measure.value()->start_measurement();
CHECK_SUCCESS_AS_EXPECTED(status, "Failed starting temperature measurement on device {}", device.get().get_dev_id());
- temp_measurements.emplace(device.get().get_dev_id(), std::move(temp_measure));
+ temp_measurements.emplace(device.get().get_dev_id(), temp_measure.release());
}
}
if (params.measure_temp) {
for(const auto &temp_measure_pair : temp_measurements) {
temp_measure_pair.second->stop_measurement();
- auto temp_measure_p = make_shared_nothrow<TempMeasurementData>(temp_measure_pair.second->get_data());
+ auto temp_measure_p = make_shared_nothrow<AccumulatorResults>(temp_measure_pair.second->get_data());
CHECK_NOT_NULL_AS_EXPECTED(temp_measure_p, HAILO_OUT_OF_HOST_MEMORY);
auto status = inference_result.set_temp_measurement(temp_measure_pair.first, std::move(temp_measure_p));
CHECK_SUCCESS_AS_EXPECTED(status);
}
if (params.runtime_data.collect_runtime_data) {
- const auto runtime_data_output_path = format_runtime_data_output_path(
- params.runtime_data.runtime_data_output_path, params.hef_path);
+ auto output_path = (1 == physical_devices.size()) ? params.runtime_data.runtime_data_output_path :
+ (std::string(device.get().get_dev_id()) + "_" + params.runtime_data.runtime_data_output_path);
+ const auto runtime_data_output_path = format_runtime_data_output_path(output_path, params.hef_path);
DownloadActionListCommand::execute(device.get(), runtime_data_output_path, network_group_list.value(),
params.hef_path);
}
#include "hailortcli.hpp"
#include "common.hpp"
#include "power_measurement_command.hpp"
-#include "temp_measurement.hpp"
+#include "common/device_measurements.hpp"
#include "CLI/CLI.hpp"
#include "inference_result.hpp"
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file temp_measurement.cpp
- * @brief Measure temperature of Hailo chip
- **/
-
-#include "temp_measurement.hpp"
-
-constexpr std::chrono::milliseconds DEFAULT_TEMPERATURE_MEASUREMENTS_INTERVAL(1000);
-
-static float32_t calc_avg(uint32_t old_samples_count, float32_t old_avg, uint32_t new_samples_count, float32_t new_value)
-{
- float32_t old_samples = static_cast<float32_t>(old_samples_count);
- float32_t new_samples = static_cast<float32_t>(new_samples_count);
- float32_t total_samples_count = old_samples + new_samples;
- return (((old_avg * old_samples) + (new_value * new_samples)) / total_samples_count);
-}
-
-TemperatureMeasurement::TemperatureMeasurement(Device &device) :
- m_device(device),
- m_is_thread_running(false),
- m_data()
-{}
-
-TemperatureMeasurement::~TemperatureMeasurement()
-{
- stop_measurement();
-}
-
-hailo_status TemperatureMeasurement::start_measurement()
-{
- // Checking temperature sensor before starting thread
- auto temp_info = m_device.get_chip_temperature();
- CHECK_EXPECTED_AS_STATUS(temp_info);
-
- m_is_thread_running = true;
- m_thread = std::thread([this] () {
- while (m_is_thread_running.load()) {
- auto temp_info = m_device.get_chip_temperature();
- if (temp_info.status() != HAILO_SUCCESS) {
- LOGGER__ERROR("Failed to get chip's temperature, status = {}", temp_info.status());
- m_is_thread_running = false;
- break;
- }
-
- TempMeasurementData new_data = {};
- auto old_data = m_data;
-
- float32_t ts_avg = ((temp_info->ts0_temperature + temp_info->ts1_temperature) / 2);
- new_data.max_value = std::max(old_data.max_value, ts_avg);
- new_data.min_value = (old_data.min_value == 0) ? ts_avg : std::min(old_data.min_value, ts_avg);
- new_data.average_value = calc_avg(old_data.sample_count, old_data.average_value, temp_info->sample_count, ts_avg);
- new_data.sample_count = old_data.sample_count + temp_info->sample_count;
-
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_data = new_data;
- }
-
- std::this_thread::sleep_for(DEFAULT_TEMPERATURE_MEASUREMENTS_INTERVAL);
- }
- });
-
- return HAILO_SUCCESS;
-}
-
-void TemperatureMeasurement::stop_measurement()
-{
- m_is_thread_running = false;
-
- if (m_thread.joinable()) {
- m_thread.join();
- }
-}
-
-const TempMeasurementData TemperatureMeasurement::get_data()
-{
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_data;
-}
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file temp_measurement.hpp
- * @brief Measure temperature of Hailo chip
- **/
-
-#ifndef _HAILO_TEMP_MEASUREMENT_HPP_
-#define _HAILO_TEMP_MEASUREMENT_HPP_
-
-#include "hailortcli.hpp"
-#include "command.hpp"
-#include "hailo/hailort.h"
-#include "hailo/device.hpp"
-#include "CLI/CLI.hpp"
-
-#include <thread>
-
-struct TempMeasurementData {
- float32_t average_value;
- float32_t min_value;
- float32_t max_value;
- uint32_t sample_count;
-};
-
-
-class TemperatureMeasurement final {
-public:
- TemperatureMeasurement(Device &device);
- virtual ~TemperatureMeasurement();
-
- hailo_status start_measurement();
- void stop_measurement();
- const TempMeasurementData get_data();
-
-private:
- void measure_temp();
-
- Device &m_device;
- std::thread m_thread;
- std::atomic_bool m_is_thread_running;
- std::mutex m_mutex;
- TempMeasurementData m_data;
-};
-
-#endif /* _HAILO_TEMP_MEASUREMENT_HPP_ */
# set(CMAKE_C_CLANG_TIDY "clang-tidy;-checks=*")
set(HAILORT_MAJOR_VERSION 4)
-set(HAILORT_MINOR_VERSION 12)
-set(HAILORT_REVISION_VERSION 1)
+set(HAILORT_MINOR_VERSION 13)
+set(HAILORT_REVISION_VERSION 0)
# Add the cmake folder so the modules there are found
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
target_include_directories(readerwriterqueue INTERFACE ${HAILO_EXTERNAL_DIR}/readerwriterqueue)
add_subdirectory(src)
+set(NET_FLOW_INFRA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests/infra/net_flow")
if(HAILO_BUILD_EXAMPLES)
add_subdirectory(examples)
message(FATAL_ERROR "Only unix hosts are supported, stopping build")
endif()
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
# GST_PLUGIN_DEFINE needs PACKAGE to be defined
set(GST_HAILO_PACKAGE_NAME "hailo")
gst-hailo/metadata/tensor_meta.cpp
gst-hailo/hailo_events/hailo_events.cpp)
+set_property(TARGET gsthailo PROPERTY CXX_STANDARD 14)
+
set_target_properties(gsthailo PROPERTIES
PUBLIC_HEADER "gst-hailo/metadata/tensor_meta.hpp"
)
#define DEFAULT_VDEVICE_KEY (0)
#define MIN_VALID_VDEVICE_KEY (1)
-#define HAILO_SUPPORTED_FORMATS "{ RGB, RGBA, YUY2, NV12, NV21 }"
+#define HAILO_SUPPORTED_FORMATS "{ RGB, RGBA, YUY2, NV12, NV21, I420 }"
#define HAILO_VIDEO_CAPS GST_VIDEO_CAPS_MAKE(HAILO_SUPPORTED_FORMATS)
#define HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS (0)
#include <memory>
#include <mutex>
+#include <thread>
G_BEGIN_DECLS
return scheduling_algorithm_type;
}
+#define GST_TYPE_HAILO_FORMAT_TYPE (gst_hailo_format_type_get_type ())
+static GType
+gst_hailo_format_type_get_type (void)
+{
+ static GType format_type_enum = 0;
+
+ /* Tightly coupled to hailo_format_type_t */
+
+ if (!format_type_enum) {
+ static GEnumValue format_types[] = {
+ { HAILO_FORMAT_TYPE_AUTO, "auto", "HAILO_FORMAT_TYPE_AUTO"},
+ { HAILO_FORMAT_TYPE_UINT8, "uint8", "HAILO_FORMAT_TYPE_UINT8"},
+ { HAILO_FORMAT_TYPE_UINT16, "uint16", "HAILO_FORMAT_TYPE_UINT16"},
+ { HAILO_FORMAT_TYPE_FLOAT32, "float32", "HAILO_FORMAT_TYPE_FLOAT32"},
+ { HAILO_FORMAT_TYPE_MAX_ENUM, NULL, NULL },
+ };
+
+ format_type_enum = g_enum_register_static ("GstHailoFormatTypes", format_types);
+ }
+
+ return format_type_enum;
+}
+
constexpr std::chrono::milliseconds WAIT_FOR_FLUSH_TIMEOUT_MS(1000);
static void gst_hailonet_set_property(GObject *object, guint property_id, const GValue *value, GParamSpec *pspec);
PROP_SCHEDULER_TIMEOUT_MS,
PROP_SCHEDULER_THRESHOLD,
PROP_MULTI_PROCESS_SERVICE,
+ PROP_INPUT_QUANTIZED,
+ PROP_OUTPUT_QUANTIZED,
+ PROP_INPUT_FORMAT_TYPE,
+ PROP_OUTPUT_FORMAT_TYPE,
};
G_DEFINE_TYPE(GstHailoNet, gst_hailonet, GST_TYPE_BIN);
g_param_spec_enum("scheduling-algorithm", "Scheduling policy for automatic network group switching", "Controls the Model Scheduler algorithm of HailoRT. "
"Gets values from the enum GstHailoSchedulingAlgorithms. "
"Using Model Scheduler algorithm different than HAILO_SCHEDULING_ALGORITHM_NONE, excludes the property 'is-active'. "
- "When using the same VDevice across multiple hailonets, all should have the same 'scheduling-algorithm'. "
- "To run with more than one device, set env variable 'HAILO_ENABLE_MULTI_DEVICE_SCHEDULER' to 1.",
+ "When using the same VDevice across multiple hailonets, all should have the same 'scheduling-algorithm'. ",
GST_TYPE_SCHEDULING_ALGORITHM, HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN,
(GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
g_object_class_install_property(gobject_class, PROP_SCHEDULER_TIMEOUT_MS,
g_param_spec_boolean("multi-process-service", "Should run over HailoRT service", "Controls wether to run HailoRT over its service. "
"To use this property, the service should be active and scheduling-algorithm should be set. Defaults to false.",
HAILO_DEFAULT_MULTI_PROCESS_SERVICE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+ g_object_class_install_property(gobject_class, PROP_INPUT_QUANTIZED,
+ g_param_spec_boolean("input-quantized", "Is the input quantized or not", "Passing `true` under the argument means that the input data sent to the stream is quantized to begin with."
+ "This will result in an input stream that doesn't quantize the input data. Passing `false` under the argument, will lead to input data being quantized.",
+ true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+ g_object_class_install_property(gobject_class, PROP_OUTPUT_QUANTIZED,
+ g_param_spec_boolean("output-quantized", "Should the output be quantized or de-quantized","Passing `true` under the argument means that the output data received from the stream is to remain quantized"
+ "(such as it is upon exiting the device). This will result in an output stream that doesn't de-quantize the output data. Passing `false` under the argument will lead to output data being de-quantized.",
+ true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+ g_object_class_install_property(gobject_class, PROP_INPUT_FORMAT_TYPE,
+ g_param_spec_enum("input-format-type", "Input format type", "Input format type(auto, float32, uint16, uint8). Default value is auto."
+ "Gets values from the enum GstHailoFormatType. ",
+ GST_TYPE_HAILO_FORMAT_TYPE, HAILO_FORMAT_TYPE_AUTO,
+ (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+ g_object_class_install_property(gobject_class, PROP_OUTPUT_FORMAT_TYPE,
+ g_param_spec_enum("output-format-type", "Output format type", "Output format type(auto, float32, uint16, uint8). Default value is auto."
+ "Gets values from the enum GstHailoFormatType. ",
+ GST_TYPE_HAILO_FORMAT_TYPE, HAILO_FORMAT_TYPE_AUTO,
+ (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
// See information about the "flush" signal in the element description
g_signal_new(
"flush",
}
m_props.m_multi_process_service = g_value_get_boolean(value);
break;
+ case PROP_INPUT_QUANTIZED:
+ if (m_was_configured) {
+ g_warning("The network was already configured so changing the quantized flag will not take place!");
+ break;
+ }
+ m_props.m_input_quantized = g_value_get_boolean(value);
+ break;
+ case PROP_OUTPUT_QUANTIZED:
+ if (m_was_configured) {
+ g_warning("The network was already configured so changing the quantized flag will not take place!");
+ break;
+ }
+ m_props.m_output_quantized = g_value_get_boolean(value);
+ break;
+ case PROP_INPUT_FORMAT_TYPE:
+ if (m_was_configured) {
+ g_warning("The network was already configured so changing the format type will not take place!");
+ break;
+ }
+ m_props.m_input_format_type = static_cast<hailo_format_type_t>(g_value_get_enum(value));
+ break;
+ case PROP_OUTPUT_FORMAT_TYPE:
+ if (m_was_configured) {
+ g_warning("The network was already configured so changing the format type will not take place!");
+ break;
+ }
+ m_props.m_output_format_type = static_cast<hailo_format_type_t>(g_value_get_enum(value));
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
break;
case PROP_MULTI_PROCESS_SERVICE:
g_value_set_boolean(value, m_props.m_multi_process_service.get());
break;
+ case PROP_INPUT_QUANTIZED:
+ g_value_set_boolean(value, m_props.m_input_quantized.get());
+ break;
+ case PROP_OUTPUT_QUANTIZED:
+ g_value_set_boolean(value, m_props.m_output_quantized.get());
+ break;
+ case PROP_INPUT_FORMAT_TYPE:
+ g_value_set_enum(value, m_props.m_input_format_type.get());
+ break;
+ case PROP_OUTPUT_FORMAT_TYPE:
+ g_value_set_enum(value, m_props.m_output_format_type.get());
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
break;
GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting scheduler threshold failed, status = %d", status);
}
- auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats);
+ auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats, static_cast<bool>(m_props.m_input_quantized.get()),
+ static_cast<bool>(m_props.m_output_quantized.get()), m_props.m_input_format_type.get(), m_props.m_output_format_type.get());
GST_CHECK_EXPECTED_AS_STATUS(vstreams, m_element, RESOURCE, "Creating vstreams failed, status = %d", status);
GST_HAILOSEND(m_hailosend)->impl->set_input_vstreams(std::move(vstreams->first));
auto parsed_event = HailoSetOutputFormatEvent::parse(event);
if (HAILO_SUCCESS != parsed_event.status()) {
- return FALSE;
+ return FALSE;
}
m_output_formats = std::move(parsed_event->formats);
HailoNetProperties() : m_device_id(nullptr), m_hef_path(nullptr), m_network_name(nullptr), m_batch_size(HAILO_DEFAULT_BATCH_SIZE),
m_is_active(false), m_device_count(0), m_vdevice_key(DEFAULT_VDEVICE_KEY), m_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN),
m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS), m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD),
- m_multi_process_service(HAILO_DEFAULT_MULTI_PROCESS_SERVICE)
+ m_multi_process_service(HAILO_DEFAULT_MULTI_PROCESS_SERVICE), m_input_quantized(true), m_output_quantized(true), m_input_format_type(HAILO_FORMAT_TYPE_AUTO),
+ m_output_format_type(HAILO_FORMAT_TYPE_AUTO)
+
{}
HailoElemProperty<gchar*> m_device_id;
HailoElemProperty<guint32> m_scheduler_timeout_ms;
HailoElemProperty<guint32> m_scheduler_threshold;
HailoElemProperty<gboolean> m_multi_process_service;
+ HailoElemProperty<gboolean> m_input_quantized;
+ HailoElemProperty<gboolean> m_output_quantized;
+ HailoElemProperty<hailo_format_type_t> m_input_format_type;
+ HailoElemProperty<hailo_format_type_t> m_output_format_type;
};
class HailoNetImpl final
for (auto &out_vstream : m_output_vstreams) {
GstHailoBufferPool *hailo_pool = GST_HAILO_BUFFER_POOL(g_object_new(GST_TYPE_HAILO_BUFFER_POOL, NULL));
gst_object_ref_sink(hailo_pool);
- memcpy(hailo_pool->vstream_name, out_vstream.name().c_str(), sizeof(hailo_pool->vstream_name));
+ strncpy(hailo_pool->vstream_name, out_vstream.name().c_str(), out_vstream.name().length() + 1);
hailo_pool->element_name = GST_ELEMENT_NAME(GST_ELEMENT_PARENT(m_element));
GstBufferPool *pool = GST_BUFFER_POOL(hailo_pool);
#define YUY2_FEATURES_SIZE (2)
#define NV12_FEATURES_SIZE (3)
#define NV21_FEATURES_SIZE (3)
+#define I420_FEATURES_SIZE (3)
static void gst_hailosend_set_property(GObject *object, guint property_id, const GValue *value, GParamSpec *pspec);
static void gst_hailosend_get_property(GObject *object, guint property_id, GValue *value, GParamSpec *pspec);
gst_pad_template_new("sink", GST_PAD_SINK, GST_PAD_ALWAYS, gst_caps_from_string(HAILO_VIDEO_CAPS)));
gst_element_class_set_static_metadata(GST_ELEMENT_CLASS(klass),
- "hailosend element", "Hailo/Filter/Video", "Send RGB/RGBA/YUY2 video to HailoRT", PLUGIN_AUTHOR);
+ "hailosend element", "Hailo/Filter/Video", "Send RGB/RGBA/YUY2/NV12/NV21/I420 video to HailoRT", PLUGIN_AUTHOR);
element_class->change_state = GST_DEBUG_FUNCPTR(gst_hailosend_change_state);
"Features of input vstream %s is not %d for NV21 format! (features=%d)", m_input_vstream_infos[0].name, NV21_FEATURES_SIZE,
m_input_vstream_infos[0].shape.features);
break;
+ case HAILO_FORMAT_ORDER_I420:
+ format = "I420";
+ GST_CHECK(I420_FEATURES_SIZE == m_input_vstream_infos[0].shape.features, NULL, m_element, STREAM,
+ "Features of input vstream %s is not %d for I420 format! (features=%d)", m_input_vstream_infos[0].name, I420_FEATURES_SIZE,
+ m_input_vstream_infos[0].shape.features);
+ break;
default:
GST_ELEMENT_ERROR(m_element, RESOURCE, FAILED,
("Input VStream %s has an unsupported format order! order = %d", m_input_vstream_infos[0].name, m_input_vstream_infos[0].format.order), (NULL));
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
+
+#include "hailo/hailort_common.hpp"
+
#include "network_group_handle.hpp"
#include <sstream>
#include <chrono>
+
std::unordered_set<std::shared_ptr<VDevice>> NetworkGroupHandle::m_vdevices;
NetworkGroupConfigManager NetworkGroupHandle::m_net_group_config_manager;
NetworkGroupActivationManager NetworkGroupHandle::m_net_group_activation_manager;
}
Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> NetworkGroupHandle::create_vstreams(const char *network_name,
- hailo_scheduling_algorithm_t scheduling_algorithm, const std::vector<hailo_format_with_name_t> &output_formats)
+ hailo_scheduling_algorithm_t scheduling_algorithm, const std::vector<hailo_format_with_name_t> &output_formats, bool input_quantized,
+ bool output_quantized, hailo_format_type_t input_format_type, hailo_format_type_t output_format_type)
{
GST_CHECK(nullptr != network_name, make_unexpected(HAILO_INVALID_ARGUMENT), m_element, RESOURCE, "Got nullptr in network name!");
auto expected_input_vstream_infos = hef()->get_input_vstream_infos(network_name);
GST_CHECK_EXPECTED(expected_input_vstream_infos, m_element, RESOURCE, "Failed getting input vstream infos, status = %d",
expected_input_vstream_infos.status());
- auto expected_input_params_map = m_cng->make_input_vstream_params(true, HAILO_FORMAT_TYPE_AUTO, HAILO_DEFAULT_VSTREAM_TIMEOUT_MS,
+ auto expected_input_params_map = m_cng->make_input_vstream_params(input_quantized, input_format_type, HAILO_DEFAULT_VSTREAM_TIMEOUT_MS,
HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, m_network_name);
GST_CHECK_EXPECTED(expected_input_params_map, m_element, RESOURCE, "Failed making input vstream params, status = %d",
expected_input_params_map.status());
-
// In RGB formats, Gstreamer is padding each row to 4.
auto &&input_params_map = expected_input_params_map.release();
auto &&input_infos = expected_input_vstream_infos.release();
GST_CHECK(1 == input_vstreams->size(), make_unexpected(HAILO_INVALID_OPERATION), m_element, RESOURCE,
"hailosend element supports only HEFs with one input for now!");
- auto output_params_map = m_cng->make_output_vstream_params(true, HAILO_FORMAT_TYPE_AUTO, HAILO_DEFAULT_VSTREAM_TIMEOUT_MS,
+ auto output_params_map = m_cng->make_output_vstream_params(output_quantized, output_format_type, HAILO_DEFAULT_VSTREAM_TIMEOUT_MS,
HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, m_network_name);
GST_CHECK_EXPECTED(output_params_map, m_element, RESOURCE, "Failed making output vstream params, status = %d",
output_params_map.status());
Expected<NetworkGroupsParamsMap> NetworkGroupHandle::get_configure_params(Hef &hef, const VDevice &vdevice,
const char *net_group_name, uint16_t batch_size)
{
- auto stream_interface = vdevice.get_default_streams_interface();
- GST_CHECK_EXPECTED(stream_interface, m_element, RESOURCE,
- "Failed default stream interface configure params, status = %d", stream_interface.status());
-
- auto params = hef.create_configure_params(*stream_interface, net_group_name);
+ auto params = vdevice.create_configure_params(hef, net_group_name);
GST_CHECK_EXPECTED(params, m_element, RESOURCE, "Failed creating configure params, status = %d", params.status());
params->batch_size = batch_size;
bool multi_process_service, const char *hef_path);
hailo_status configure_network_group(const char *net_group_name, hailo_scheduling_algorithm_t scheduling_algorithm, uint16_t batch_size);
Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> create_vstreams(const char *network_name,
- hailo_scheduling_algorithm_t scheduling_algorithm, const std::vector<hailo_format_with_name_t> &output_formats);
+ hailo_scheduling_algorithm_t scheduling_algorithm, const std::vector<hailo_format_with_name_t> &output_formats, bool input_quantized,
+ bool output_quantized, hailo_format_type_t input_format_type, hailo_format_type_t output_format_type);
hailo_status activate_network_group();
Expected<bool> remove_network_group();
+++ /dev/null
-from hailo_platform import (HEF, VDevice, ConfigureParams, InferVStreams, InputVStreamParams,
- OutputVStreamParams, FormatType)
-from hailo_platform.pyhailort.pyhailort import HailoStreamInterface
-import numpy as np
-import argparse
-
-def parse_args():
- parser = argparse.ArgumentParser(description='Streaming API example')
- parser.add_argument('hef_path', type=str, help='Path of the HEF to run')
- parser.add_argument('-n', '--num-frames', type=int, default=10, help='Number of frames to send')
- return parser.parse_args()
-
-def main():
- args = parse_args()
- with VDevice() as target:
- hef = HEF(args.hef_path)
- configure_params = ConfigureParams.create_from_hef(hef, interface=HailoStreamInterface.PCIe)
- network_groups = target.configure(hef, configure_params)
- network_group = network_groups[0]
- network_group_params = network_group.create_params()
- input_vstreams_params = InputVStreamParams.make(network_group, quantized=False, format_type=FormatType.FLOAT32)
- output_vstreams_params = OutputVStreamParams.make(network_group, quantized=True, format_type=FormatType.AUTO)
- with InferVStreams(network_group, input_vstreams_params, output_vstreams_params) as infer_pipeline:
- input_names_to_shape = {vstream_info.name: vstream_info.shape for vstream_info in hef.get_input_vstream_infos()}
- input_data = {name : 1 + np.ndarray([args.num_frames] + list(shape), dtype=np.float32) for name, shape in input_names_to_shape.items()}
- with network_group.activate(network_group_params):
- _ = infer_pipeline.infer(input_data)
- fps = args.num_frames / infer_pipeline.get_hw_time()
-
- print('Inference ran successfully')
- print(f'FPS: {fps}')
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
+++ /dev/null
-import argparse
-import time
-import numpy as np
-
-from multiprocessing import Process
-from hailo_platform import (HEF, VDevice, HailoStreamInterface, ConfigureParams, InputVStreamParams, InputVStreams,
- OutputVStreamParams, OutputVStreams)
-
-def send(configured_network, num_frames):
- vstreams_params = InputVStreamParams.make(configured_network)
- configured_network.wait_for_activation(1000)
- with InputVStreams(configured_network, vstreams_params) as vstreams:
- vstream_to_buffer = {vstream: np.ndarray([1] + list(vstream.shape), dtype=vstream.dtype) for vstream in vstreams}
- for _ in range(num_frames):
- for vstream, buff in vstream_to_buffer.items():
- vstream.send(buff)
- # Flushing is not mandatory here
- for vstream in vstreams:
- vstream.flush()
-
-def recv(configured_network, vstreams_params, num_frames):
- configured_network.wait_for_activation(1000)
- with OutputVStreams(configured_network, vstreams_params) as vstreams:
- for _ in range(num_frames):
- for vstream in vstreams:
- _ = vstream.recv()
-
-def recv_all(configured_network, num_frames):
- vstreams_params_groups = OutputVStreamParams.make_groups(configured_network)
- recv_procs = []
- for vstreams_params in vstreams_params_groups:
- proc = Process(target=recv, args=(configured_network, vstreams_params, num_frames))
- proc.start()
- recv_procs.append(proc)
-
- for proc in recv_procs:
- proc.join()
-
-def parse_args():
- parser = argparse.ArgumentParser(description='vStream API example')
- parser.add_argument('hef_path', type=str, help='Path of the HEF to run')
- parser.add_argument('-n', '--num-frames', type=int, default=1000, help='Number of frames to send')
- return parser.parse_args()
-
-def main():
- args = parse_args()
- hef = HEF(args.hef_path)
-
- with VDevice() as device:
- configure_params = ConfigureParams.create_from_hef(hef, interface=HailoStreamInterface.PCIe)
- network_group = device.configure(hef, configure_params)[0]
- network_group_params = network_group.create_params()
- send_process = Process(target=send, args=(network_group, args.num_frames))
- recv_process = Process(target=recv_all, args=(network_group, args.num_frames))
-
- time_before = time.time()
- recv_process.start()
- send_process.start()
- with network_group.activate(network_group_params):
- send_process.join()
- recv_process.join()
-
- fps = args.num_frames / (time.time() - time_before)
-
- print('Inference ran successfully')
- print(f'FPS: {fps}')
-
-if __name__ == '__main__':
- main()
InputVStreams, OutputVStreams,
InferVStreams, HailoStreamDirection, HailoFormatFlags, HailoCpuId, Device, VDevice,
DvmTypes, PowerMeasurementTypes, SamplingPeriod, AveragingFactor, MeasurementBufferIndex,
- HailoRTException, YOLOv5PostProcessingOp)
+ HailoRTException, YOLOv5PostProcessOp, HailoSchedulingAlgorithm)
def _verify_pyhailort_lib_exists():
python_version = "".join(str(i) for i in sys.version_info[:2])
'MipiIspImageInOrder', 'MipiIspImageOutDataType', 'join_drivers_path', 'IspLightFrequency', 'HailoPowerMode',
'Endianness', 'HailoStreamInterface', 'InputVStreamParams', 'OutputVStreamParams',
'InputVStreams', 'OutputVStreams', 'InferVStreams', 'HailoStreamDirection', 'HailoFormatFlags', 'HailoCpuId',
- 'Device', 'VDevice', 'HailoRTException', 'YOLOv5PostProcessingOp']
+ 'Device', 'VDevice', 'HailoRTException', 'YOLOv5PostProcessOp', 'HailoSchedulingAlgorithm']
import hailo_platform.pyhailort._pyhailort as _pyhailort
-
class ControlObjectException(Exception):
"""Raised on illegal ContolObject operation."""
pass
ignore_socket_errors (bool, optional): Ignore socket error (might be usefull for debugging).
"""
# In the C API we define the total amount of attempts, instead of the amount of retries.
+
+ # TODO: HRT-9987 - Add this deprecation warning
+ # default_logger().warning("UdpHcpControl is deprecated! Please Use Control object")
max_number_of_attempts = retries + 1
response_timeout_milliseconds = int(response_timeout_seconds * 1000)
if device is None:
def __init__(self, device=None, device_info=None):
"""Initializes a new HailoPcieController object."""
+ # TODO: HRT-9987 - Add this deprecation warning
+ # default_logger().warning("PcieHcpControl is deprecated! Please Use Control object")
if device_info is None:
device_info = InternalPcieDevice.scan_devices()[0]
class HailoHWObject(object):
- """Abstract Hailo hardware device representation."""
+ # TODO: HRT-9987 - Add (deprecated) to this doc
+ """Abstract Hailo hardware device representation"""
NAME = InferenceTargets.UNINITIALIZED
IS_HARDWARE = True
self._is_device_used = False
self._hef_loaded = False
+ # TODO: HRT-9987 - Add this deprecation warning
+ # self._logger.warning("HailoHWObject is deprecated! Please use VDevice/Device object.")
+
# TODO: HRT-6310 Remove this.
def __eq__(self, other):
return type(self).NAME == other
@property
def name(self):
"""str: The name of this target. Valid values are defined by :class:`~hailo_platform.pyhailort.hw_object.InferenceTargets`"""
- # self._logger.warning("HailoHWObject name property is deprecated! Please use VDevice object with device_id.")
+ # TODO: HRT-9987 - Add this deprecation warning
+ # self._logger.warning("HailoHWObject name property is deprecated! Please use VDevice/Device object with device_id.")
return type(self).NAME
@property
def is_hardware(self):
"""bool: Indicates this target runs on a physical hardware device."""
# TODO: SDK should implement in Target
- # self._logger.warning("HailoHWObject is_hardware property is deprecated! Please use VDevice object, or derive from it.")
+ # TODO: HRT-9987 - Add this deprecation warning
+ # self._logger.warning("HailoHWObject is_hardware property is deprecated! Please use VDevice/Device object, or derive from it.")
return type(self).IS_HARDWARE
@property
Returns:
list of str: Sorted list of the output layer names.
"""
+ # TODO: HRT-9987 - Add this deprecation warning
# self._logger.warning("HailoHWObject sorted_output_layer_names property is deprecated! Please use ConfiguredNetwork get_sorted_output_names.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to sorted_output_layer_names is only allowed when there is a single loaded network group")
@contextmanager
def use_device(self, *args, **kwargs):
- # self._logger.warning("HailoHWObject use_device context manager is deprecated! Please use VDevice object.")
- """A context manager that wraps the usage of the device (deprecated)."""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
+ # self._logger.warning("HailoHWObject use_device context manager is deprecated! Please use VDevice/Device object.")
+ """A context manager that wraps the usage of the device."""
self._is_device_used = True
yield
self._is_device_used = False
Returns:
dict: Keys are device output names and values are lists of layers' names.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject get_output_device_layer_to_original_layer_map function is deprecated!")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to layer names is only allowed when there is a single loaded network group")
Returns:
dict: Keys are the names of the layers and values are device outputs names.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject get_original_layer_to_device_layer_map function is deprecated!")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to layer names is only allowed when there is a single loaded network group")
@property
def device_input_layers(self):
"""Get a list of the names of the device's inputs."""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject device_input_layers function is deprecated! Please use ConfiguredNetwork object.")
return [layer.name for layer in self.get_input_stream_infos()]
@property
def device_output_layers(self):
"""Get a list of the names of the device's outputs."""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject device_output_layers function is deprecated! Please use ConfiguredNetwork object.")
return [layer.name for layer in self.get_output_stream_infos()]
def hef_loaded(self):
"""Return True if this object has loaded the model HEF to the hardware device."""
# TODO: SDK should implement in Target
- # self._logger.warning("HailoHWObject hef_loaded function is deprecated! Please use VDevice object, or derive from it.")
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
+ # self._logger.warning("HailoHWObject hef_loaded function is deprecated! Please use VDevice/Device object, or derive from it.")
return self._hef_loaded
def outputs_count(self):
"""Return the amount of output tensors that are returned from the hardware device for every
input image.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject outputs_count function is deprecated! Please use ConfiguredNetwork object.")
return len(self.get_output_vstream_infos())
def _clear_shapes(self):
# TODO: SDK should implement in Target
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject _clear_shapes function is deprecated! Please use ConfiguredNetwork object.")
self._hw_consts = None
Returns:
str: Model name.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject model_name property is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) == 1:
return self._loaded_network_groups[0].name
Returns:
Tuple of output shapes, sorted by the output names.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoHWObject get_output_shapes function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Calling get_output_shapes is only allowed when there is a single loaded network group")
class HailoChipObject(HailoHWObject):
+ # TODO: HRT-9987 - Add (deprecated) to this docs
"""Hailo hardware device representation"""
def __init__(self):
Returns:
dict of :obj:'numpy.dtype': where the key is model input_layer name, and the value is dtype as the device expect to get for this input.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_all_input_layers_dtype function is deprecated! Please use ConfiguredNetwork object.")
return {stream.name: HailoRTTransformUtils.get_dtype(stream.data_bytes) for stream in self.get_input_stream_infos()}
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all input vstreams
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_input_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network vstream info is only allowed when there is a single loaded network group")
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all output vstreams
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_output_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network vstream info is only allowed when there is a single loaded network group")
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with all the information objects of all input and output vstreams
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_all_vstream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network vstream info is only allowed when there is a single loaded network group")
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with information objects
of all input low-level streams.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_input_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network stream info is only allowed when there is a single loaded network group")
:obj:`hailo_platform.pyhailort._pyhailort.VStreamInfo`: with information objects
of all output low-level streams.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_output_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network stream info is only allowed when there is a single loaded network group")
If there is exactly one configured network group, returns a list of
:obj:`hailo_platform.pyhailort._pyhailort.StreamInfo`: with all the information objects of all input and output streams
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_all_stream_infos function is deprecated! Please use ConfiguredNetwork object.")
if len(self._loaded_network_groups) != 1:
raise HailoHWObjectException("Access to network stream info is only allowed when there is a single loaded network group")
Returns:
Tuple of integers representing the input_shape.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_input_shape function is deprecated! Please use ConfiguredNetwork object.")
if name is None:
name = self.get_input_vstream_infos()[0].name
Returns:
int: The index of the layer name in the output list.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# self._logger.warning("HailoChipObject get_index_from_name function is deprecated! Please use ConfiguredNetwork object.")
try:
return self.sorted_output_layer_names.index(name)
class EthernetDevice(HailoChipObject):
- """Represents any Hailo hardware device that supports UDP control and dataflow."""
+ # TODO: HRT-9987 - Add (deprecated) to this docs
+ """Represents any Hailo hardware device that supports UDP control and dataflow"""
NAME = InferenceTargets.UDP_CONTROLLER
Returns:
list of str: IPs of scanned devices.
"""
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
# default_logger().warning("EthernetDevice scan_devices method is deprecated! Please use scan() of Device object.")
udp_scanner = HailoUdpScan()
return udp_scanner.scan_devices(interface_name, timeout_seconds=timeout_seconds)
@property
def remote_ip(self):
"""Return the IP of the remote device."""
- # self._logger.warning("EthernetDevice remote_ip method is deprecated! Please use Device object.")
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
+ # self._logger.warning("EthernetDevice remote_ip method is deprecated! Please use VDevice/Device object.")
return self._remote_ip
class PcieDevice(HailoChipObject):
- """Hailo PCIe production device representation."""
+ # TODO: HRT-9987 - Add (deprecated) to this docs
+ """Hailo PCIe production device representation"""
NAME = InferenceTargets.PCIE_CONTROLLER
:func:`PcieDevice.scan_devices` to get list of all available devices.
"""
super(PcieDevice, self).__init__()
- # self._logger.warning("PcieDevice deprecated! Please use VDevice object.")
+ # TODO: HRT-9987 - Add this deprecation warning
+ # self._logger.warning("PcieDevice is deprecated! Please use VDevice/Device object.")
gc.collect()
# PcieDevice __del__ function tries to release self._device.
Returns:
list of :obj:`hailo_platform.pyhailort.pyhailort.PcieDeviceInfo`
"""
- # default_logger().warning("PcieDevice scan_devices method is deprecated! Please use scan() of Device object.")
+ # TODO: HRT-9987 - Add this deprecation warning and (deprecated) to this docs
+ # default_logger().warning("PcieDevice scan_devices method is deprecated! Please use Device object.")
return InternalPcieDevice.scan_devices()
def _open_device(self, device_info):
MIN_UDP_PADDED_PAYLOAD_SIZE = HailoSocketDefs.MIN_UDP_PADDED_PAYLOAD_SIZE()
MAX_ALIGNED_UDP_PAYLOAD_SIZE_RTP = HailoSocketDefs.MAX_ALIGNED_UDP_PAYLOAD_SIZE_RTP()
+class HailoSchedulingAlgorithm(_pyhailort.SchedulingAlgorithm):
+ pass
class HailoRTException(Exception):
pass
def __exit__(self, exception_type, value, traceback):
if value is not None:
if exception_type is _pyhailort.HailoRTStatusException:
- self._raise_indicative_status_exception(int(value.args[0]))
+ self._raise_indicative_status_exception(value)
else:
raise
- def _raise_indicative_status_exception(self, error_code):
+ def _raise_indicative_status_exception(self, libhailort_exception):
+ error_code = int(libhailort_exception.args[0])
string_error_code = get_status_message(error_code)
if string_error_code == "HAILO_ETH_RECV_FAILURE":
- raise UdpRecvError("Failed to receive data")
+ raise UdpRecvError("Failed to receive data") from libhailort_exception
if string_error_code == "HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION":
- raise InvalidProtocolVersionException("HailoRT has failed because an invalid protocol version was received from device")
+ raise InvalidProtocolVersionException("HailoRT has failed because an invalid protocol version was received from device") from libhailort_exception
if string_error_code == "HAILO_FW_CONTROL_FAILURE":
- raise HailoRTFirmwareControlFailedException("libhailort control operation failed")
+ raise HailoRTFirmwareControlFailedException("libhailort control operation failed") from libhailort_exception
if string_error_code == "HAILO_UNSUPPORTED_OPCODE":
- raise HailoRTUnsupportedOpcodeException("HailoRT has failed because an unsupported opcode was sent to device")
+ raise HailoRTUnsupportedOpcodeException("HailoRT has failed because an unsupported opcode was sent to device") from libhailort_exception
if string_error_code == "HAILO_INVALID_FRAME":
- raise HailoRTInvalidFrameException("An invalid frame was received")
+ raise HailoRTInvalidFrameException("An invalid frame was received") from libhailort_exception
if string_error_code == "HAILO_TIMEOUT":
- raise HailoRTTimeout("Received a timeout - hailort has failed because a timeout had occurred")
+ raise HailoRTTimeout("Received a timeout - hailort has failed because a timeout had occurred") from libhailort_exception
if string_error_code == "HAILO_STREAM_ABORTED_BY_HW":
- raise HailoRTStreamAborted("Stream aborted due to an external event")
+ raise HailoRTStreamAborted("Stream aborted due to an external event") from libhailort_exception
if string_error_code == "HAILO_INVALID_OPERATION":
- raise HailoRTInvalidOperationException("Invalid operation. See hailort.log for more information")
+ raise HailoRTInvalidOperationException("Invalid operation. See hailort.log for more information") from libhailort_exception
if string_error_code == "HAILO_INVALID_ARGUMENT":
- raise HailoRTInvalidArgumentException("Invalid argument. See hailort.log for more information")
+ raise HailoRTInvalidArgumentException("Invalid argument. See hailort.log for more information") from libhailort_exception
if string_error_code == "HAILO_NOT_FOUND":
- raise HailoRTNotFoundException("Item not found. See hailort.log for more information")
+ raise HailoRTNotFoundException("Item not found. See hailort.log for more information") from libhailort_exception
if string_error_code == "HAILO_INVALID_HEF":
- raise HailoRTInvalidHEFException("Invalid HEF. See hailort.log for more information")
+ raise HailoRTInvalidHEFException("Invalid HEF. See hailort.log for more information") from libhailort_exception
if string_error_code == "HAILO_ETH_FAILURE":
- raise HailoRTEthException("Ethernet failure. See hailort.log for more information")
- if string_error_code == "HAILO_PCIE_DRIVER_FAIL":
- raise HailoRTPCIeDriverException("PCIe driver failure. run 'dmesg | grep hailo' for more information")
+ raise HailoRTEthException("Ethernet failure. See hailort.log for more information") from libhailort_exception
+ if string_error_code == "HAILO_DRIVER_FAIL":
+ raise HailoRTPCIeDriverException("PCIe driver failure. run 'dmesg | grep hailo' for more information") from libhailort_exception
if string_error_code == "HAILO_NETWORK_GROUP_NOT_ACTIVATED":
- raise HailoRTNetworkGroupNotActivatedException("Network group is not activated")
+ raise HailoRTNetworkGroupNotActivatedException("Network group is not activated") from libhailort_exception
else:
- raise HailoRTException("libhailort failed with error: {} ({})".format(error_code, string_error_code))
+ raise HailoRTException("libhailort failed with error: {} ({})".format(error_code, string_error_code)) from libhailort_exception
def get_status_message(status_code):
status_str = _pyhailort.get_status_message(status_code)
def __init__(self, configured_network, target, hef):
self._configured_network = configured_network
+ self._input_vstreams_holders = []
+ self._output_vstreams_holders = []
self._target = target
self._hef = hef
:class:`ActivatedNetworkContextManager`: Context manager that returns the activated
network group.
"""
+ # TODO: HRT-9988 - Add deprecation warning when changing to service by default
network_group_params = network_group_params or self.create_params()
with ExceptionWrapper():
with ExceptionWrapper():
return self._configured_network.get_udp_rates_dict(int(fps), int(max_supported_rate_bytes))
+ def _before_fork(self):
+ if self._configured_network is not None:
+ self._configured_network.before_fork()
+ for input_vstreams in self._input_vstreams_holders:
+ input_vstreams.before_fork()
+ for output_vstreams in self._output_vstreams_holders:
+ output_vstreams.before_fork()
+
+ def _after_fork_in_parent(self):
+ if self._configured_network is not None:
+ self._configured_network.after_fork_in_parent()
+ for input_vstreams in self._input_vstreams_holders:
+ input_vstreams.after_fork_in_parent()
+ for output_vstreams in self._output_vstreams_holders:
+ output_vstreams.after_fork_in_parent()
+
+ def _after_fork_in_child(self):
+ if self._configured_network is not None:
+ self._configured_network.after_fork_in_child()
+ for input_vstreams in self._input_vstreams_holders:
+ input_vstreams.after_fork_in_child()
+ for output_vstreams in self._output_vstreams_holders:
+ output_vstreams.after_fork_in_child()
+
def _create_input_vstreams(self, input_vstreams_params):
- return self._configured_network.InputVStreams(input_vstreams_params)
+ input_vstreams_holder = self._configured_network.InputVStreams(input_vstreams_params)
+ self._input_vstreams_holders.append(input_vstreams_holder)
+ return input_vstreams_holder
def _create_output_vstreams(self, output_vstreams_params):
- return self._configured_network.OutputVStreams(output_vstreams_params)
+ output_vstreams_holder = self._configured_network.OutputVStreams(output_vstreams_params)
+ self._output_vstreams_holders.append(output_vstreams_holder)
+ return output_vstreams_holder
def get_stream_names_from_vstream_name(self, vstream_name):
"""Get stream name from vstream name for a specific network group.
with ExceptionWrapper():
return self._hef.get_vstream_names_from_stream_name(stream_name, self.name)
+ def set_scheduler_timeout(self, timeout_ms, network_name=None):
+ """Sets the maximum time period that may pass before getting run time from the scheduler,
+ even without reaching the minimum required send requests (e.g. threshold - see set_scheduler_threshold()),
+ as long as at least one send request has been sent.
+ This time period is measured since the last time the scheduler gave this network group run time.
+
+ Args:
+ timeout_ms (int): Timeout in milliseconds.
+ """
+ name = network_name if network_name is not None else self.name
+ return self._configured_network.set_scheduler_timeout(timeout_ms, name)
+
+ def set_scheduler_threshold(self, threshold):
+ """Sets the minimum number of send requests required before the network is considered ready to get run time from the scheduler.
+ If at least one send request has been sent, but the threshold is not reached within a set time period (e.g. timeout - see hailo_set_scheduler_timeout()),
+ the scheduler will consider the network ready regardless.
+
+ Args:
+ threshold (int): Threshold in number of frames.
+ """
+ return self._configured_network.set_scheduler_threshold(threshold)
+
+ def set_scheduler_priority(self, priority):
+ """Sets the priority of the network.
+ When the model scheduler will choose the next network, networks with higher priority will be prioritized in the selection.
+ bigger number represent higher priority.
+
+ Args:
+ priority (int): Priority as a number between HAILO_SCHEDULER_PRIORITY_MIN - HAILO_SCHEDULER_PRIORITY_MAX.
+ """
+ return self._configured_network.set_scheduler_priority(priority)
+
class ActivatedNetworkContextManager(object):
"""A context manager that returns the activated network group upon enter."""
class InternalEthernetDevice(object):
def __init__(self, address, port, response_timeout_seconds=10, max_number_of_attempts=3):
- # default_logger().warning("InternalEthernetDevice is deprecated! Please use Device object.")
+ # TODO: HRT-9987 - Add this deprecation warning
+ # default_logger().warning("InternalEthernetDevice is deprecated! Please use VDevice object.")
self.device = None
self._address = address
self._port = port
def __init__(self, bus, device, func, domain=None):
super(PcieDeviceInfo, self).__init__()
- # default_logger().warning("PcieDeviceInfo is deprecated! Please use Device object with device_id.")
self.bus = bus
self.device = device
self.func = func
class InternalPcieDevice(object):
def __init__(self, device_info=None):
- # self._logger.warning("InternalPcieDevice deprecated! Please use Device object.")
self.device = None
if device_info is None:
device_info = InternalPcieDevice.scan_devices()[0]
SUPPORTED_PROTOCOL_VERSION = 2
SUPPORTED_FW_MAJOR = 4
-SUPPORTED_FW_MINOR = 12
-SUPPORTED_FW_REVISION = 1
+SUPPORTED_FW_MINOR = 13
+SUPPORTED_FW_REVISION = 0
MEGA_MULTIPLIER = 1000.0 * 1000.0
HAILO8_A0 = 0
HAILO8 = 1
HAILO8L = 2
- MERCURY_CA = 3
+ HAILO15 = 3
def __str__(self):
return self.name
if ((device_arch == DeviceArchitectureTypes.HAILO8) or
(device_arch == DeviceArchitectureTypes.HAILO8L)):
return 'hailo8'
- elif device_arch == DeviceArchitectureTypes.MERCURY_CA:
- return 'mercury'
+ elif device_arch == DeviceArchitectureTypes.HAILO15:
+ return 'hailo15'
else:
raise HailoRTException("Unsupported device architecture.")
def _is_feature_enabled(self, feature):
return (self.supported_features & feature) != 0
-
class Control:
"""The control object of this device, which implements the control API of the Hailo device.
Should be used only from Device.control"""
WORD_SIZE = 4
def __init__(self, device: '_pyhailort.Device'):
- self._device = device
+ self.__device = device
self._logger = default_logger()
# TODO: should remove?
self._identify_info = self.identify()
+ @property
+ def _device(self):
+ if not self.__device.is_valid():
+ raise HailoRTInvalidOperationException("The device in use has been released. "
+ "This can happen if 'device.release()' has been called, or one-liner usage of control 'Device().control.XX()'")
+ return self.__device
+
@property
def device_id(self):
"""Getter for the device_id.
Returns:
- str: A string ID of the device. BDF for PCIe devices, IP address for Ethernet devices, "Core" for core devices.
+ str: A string ID of the device. BDF for PCIe devices, IP address for Ethernet devices, "Integrated" for integrated nnc devices.
"""
return self._device.device_id
"""reloads the device firmware (soft reset)"""
with ExceptionWrapper():
return self._device.reset(_pyhailort.ResetDeviceMode.SOFT)
-
+
def forced_soft_reset(self):
"""reloads the device firmware (forced soft reset)"""
with ExceptionWrapper():
configure_params_by_name (dict, optional): Maps between each net_group_name to
configure_params. In case of a mismatch with net_groups_names, default params will
be used.
- """
+ """
with ExceptionWrapper():
return self._device.configure(hef._hef, configure_params_by_name)
measurement types, please look at
:class:`~hailo_platform.pyhailort.pyhailort.PowerMeasurementTypes`.
"""
- if ((self._identify_info.device_architecture != DeviceArchitectureTypes.HAILO8) and
- (self._identify_info.device_architecture != DeviceArchitectureTypes.HAILO8L)):
- raise HailoRTException("Invalid device architecture: {}".format(self._identify_info.device_architecture))
+
with ExceptionWrapper():
return self._device.power_measurement(dvm, measurement_type)
For all supported measurement types view
:class:`~hailo_platform.pyhailort.pyhailort.PowerMeasurementTypes`.
"""
- if ((self._identify_info.device_architecture != DeviceArchitectureTypes.HAILO8) and
- (self._identify_info.device_architecture != DeviceArchitectureTypes.HAILO8L)):
- raise HailoRTException("Invalid device architecture: {}".format(self._identify_info.device_architecture))
+
with ExceptionWrapper():
return self._device.get_power_measurement(buffer_index, should_clear)
def _examine_user_config(self):
with ExceptionWrapper():
return self._device.examine_user_config()
-
+
def read_user_config(self):
"""Read the user configuration section as binary data.
"""
with ExceptionWrapper():
return self._device.write_user_config(configuration)
-
+
def _erase_user_config(self):
with ExceptionWrapper():
return self._device.erase_user_config()
-
+
def read_board_config(self):
"""Read the board configuration section as binary data.
:class:`~hailo_platform.pyhailort.pyhailort.BoardInformation`
"""
with ExceptionWrapper():
- response = self._device.identify()
+ response = self._device.identify()
board_information = BoardInformation(response.protocol_version, response.fw_version.major,
response.fw_version.minor, response.fw_version.revision, response.logger_version,
response.board_name, response.is_release, response.extended_context_switch_buffer,
c_slave = self._create_c_i2c_slave(slave)
with ExceptionWrapper():
return self._device.i2c_write(c_slave, register_address, data, len(data))
-
+
def i2c_read(self, slave, register_address, data_length):
"""Read data from an I2C slave.
c_slave = self._create_c_i2c_slave(slave)
with ExceptionWrapper():
return self._device.i2c_read(c_slave, register_address, data_length)
-
+
def read_register(self, address):
"""Read the value of a register from a given address.
register_value = self.read_register(address)
register_value &= ~(1 << bit_index)
self.write_memory(address, struct.pack('!I', register_value))
-
+
def firmware_update(self, firmware_binary, should_reset=True):
"""Update firmware binary on the flash.
with ExceptionWrapper():
return self._device.sensor_store_config(section_index, reset_data_size, sensor_type, config_file_path,
config_height, config_width, config_fps, config_name)
-
+
def store_isp_config(self, reset_config_size, isp_static_config_file_path, isp_runtime_config_file_path,
config_height=0, config_width=0, config_fps=0, config_name=None):
"""Store sensor isp configuration to Hailo chip flash memory.
"""
with ExceptionWrapper():
return self._device.sensor_get_sections_info()
-
+
def sensor_set_generic_i2c_slave(self, slave_address, register_address_size, bus_index, should_hold_bus, endianness):
"""Set a generic I2C slave for sensor usage.
class Device:
- """ Hailo device object representation. """
+ """ Hailo device object representation (for inference use VDevice)"""
@classmethod
def scan(cls):
"""Getter for the device_id.
Returns:
- str: A string ID of the device. BDF for PCIe devices, IP address for Ethernet devices, "Core" for core devices.
+ str: A string ID of the device. BDF for PCIe devices, IP address for Ethernet devices, "Integrated" for integrated nnc devices.
"""
return self._device_id
configure_params_by_name (dict, optional): Maps between each net_group_name to configure_params. If not provided, default params will be applied
"""
if self._creation_pid != os.getpid():
- raise HailoRTException("VDevice can only be configured from the process it was created in.")
+ raise HailoRTException("Device can only be configured from the process it was created in.")
with ExceptionWrapper():
configured_apps = self._device.configure(hef._hef, configure_params_by_name)
configured_networks = [ConfiguredNetwork(configured_app, self, hef) for configured_app in configured_apps]
params (:obj:`hailo_platform.pyhailort.pyhailort.VDeviceParams`, optional): VDevice params, call
:func:`VDevice.create_params` to get default params. Excludes 'device_ids'.
device_ids (list of str, optional): devices ids to create VDevice from, call :func:`Device.scan` to get
- list of all available devices. Excludes 'params'.
+ list of all available devices. Excludes 'params'. Cannot be used together with device_id.
"""
gc.collect()
+ self._logger = default_logger()
# VDevice __del__ function tries to release self._vdevice.
# to avoid AttributeError if the __init__ func fails, we set it to None first.
self._creation_pid = os.getpid()
self._device_ids = device_ids
- if self._device_ids is not None:
- if self._params is not None:
- raise HailoRTException("VDevice can be created from params or device ids. Both parameters were passed to the c'tor")
self._open_vdevice()
+ def _before_fork(self):
+ if self._vdevice is not None:
+ self._vdevice.before_fork()
+ for configured_network in self._loaded_network_groups:
+ configured_network._before_fork()
+
+ def _after_fork_in_parent(self):
+ if self._vdevice is not None:
+ self._vdevice.after_fork_in_parent()
+ for configured_network in self._loaded_network_groups:
+ configured_network._after_fork_in_parent()
+
+ def _after_fork_in_child(self):
+ if self._vdevice is not None:
+ self._vdevice.after_fork_in_child()
+ for configured_network in self._loaded_network_groups:
+ configured_network._after_fork_in_child()
+
def _open_vdevice(self):
- if self._device_ids is not None:
- with ExceptionWrapper():
- self._vdevice = _pyhailort.VDevice.create_from_ids(self._device_ids)
- else:
- if self._params is None:
- self._params = VDevice.create_params()
- with ExceptionWrapper():
- self._vdevice = _pyhailort.VDevice.create(self._params)
+ if self._params is None:
+ self._params = VDevice.create_params()
+ if sys.platform != "win32" and self._params.multi_process_service:
+ os.register_at_fork(before=lambda: self._before_fork())
+ os.register_at_fork(after_in_parent=lambda: self._after_fork_in_parent())
+ os.register_at_fork(after_in_child=lambda: self._after_fork_in_child())
+ with ExceptionWrapper():
+ device_ids = [] if self._device_ids is None else self._device_ids
+ self._vdevice = _pyhailort.VDevice.create(self._params, device_ids)
def __enter__(self):
return self
@staticmethod
def make(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
"""Create output virtual stream params from a configured network group. These params determine the format of the
- data that will be fed into the network group.
+ data that will be returned from the network group.
Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which
@staticmethod
def make_from_network_group(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
"""Create output virtual stream params from a configured network group. These params determine the format of the
- data that will be fed into the network group.
+ data that will be returned from the network group.
Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created.
- quantized (bool): Whether the data fed into the chip is already quantized. True means
+ quantized (bool): Whether the data returned from the chip is already quantized. True means
the data is already quantized. False means it's HailoRT's responsibility to quantize
(scale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
@staticmethod
def make_groups(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None):
"""Create output virtual stream params from a configured network group. These params determine the format of the
- data that will be fed into the network group. The params groups are splitted with respect to their underlying streams for multi process usges.
+ data that will be returned from the network group. The params groups are splitted with respect to their underlying streams for multi process usges.
Args:
configured_network (:class:`ConfiguredNetwork`): The configured network group for which
the params are created.
- quantized (bool): Whether the data fed into the chip is already quantized. True means
+ quantized (bool): Whether the data returned from the chip is already quantized. True means
the data is already quantized. False means it's HailoRT's responsibility to quantize
(scale) the data. Defaults to True.
format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
with ExceptionWrapper():
return self._send_object.info
+ def _before_fork(self):
+ if self._send_object is not None:
+ self._send_object.before_fork()
+
+ def _after_fork_in_parent(self):
+ if self._send_object is not None:
+ self._send_object.after_fork_in_parent()
+
+ def _after_fork_in_child(self):
+ if self._send_object is not None:
+ self._send_object.after_fork_in_child()
+
+
class InputVStreams(object):
"""Input vstreams pipelines that allows to send data, to be used as a context manager."""
def __iter__(self):
return iter(self._vstreams.values())
+ def _before_fork(self):
+ for vstream in self._vstreams.values():
+ vstream._before_fork()
+
+ def _after_fork_in_parent(self):
+ for vstream in self._vstreams.values():
+ vstream._after_fork_in_parent()
+
+ def _after_fork_in_child(self):
+ for vstream in self._vstreams.values():
+ vstream._after_fork_in_child()
+
+
class OutputLayerUtils(object):
def __init__(self, hef, vstream_name, pipeline, net_group_name=""):
self._hef = hef
with ExceptionWrapper():
return self._recv_object.info
+ def _before_fork(self):
+ if self._recv_object is not None:
+ self._recv_object.before_fork()
+
+ def _after_fork_in_parent(self):
+ if self._recv_object is not None:
+ self._recv_object.after_fork_in_parent()
+
+ def _after_fork_in_child(self):
+ if self._recv_object is not None:
+ self._recv_object.after_fork_in_child()
+
+
class OutputVStreams(object):
"""Output virtual streams pipelines that allows to receive data, to be used as a context manager."""
def __iter__(self):
return iter(self._vstreams.values())
-class YOLOv5PostProcessingOp(object):
+ def _before_fork(self):
+ for vstream in self._vstreams.values():
+ vstream._before_fork()
+
+ def _after_fork_in_parent(self):
+ for vstream in self._vstreams.values():
+ vstream._after_fork_in_parent()
+
+ def _after_fork_in_child(self):
+ for vstream in self._vstreams.values():
+ vstream._after_fork_in_child()
+
+
+class YOLOv5PostProcessOp(object):
def __init__(self, anchors, shapes, formats, quant_infos, image_height, image_width, confidence_threshold, iou_threshold, num_of_classes,
- should_dequantize, max_boxes, should_sigmoid, one_class_per_bbox=True):
+ max_boxes, cross_classes=True):
- self._op = _pyhailort.YOLOv5PostProcessingOp.create(anchors, shapes, formats, quant_infos, image_height, image_width, confidence_threshold,
- iou_threshold, num_of_classes, should_dequantize, max_boxes, should_sigmoid, one_class_per_bbox)
+ self._op = _pyhailort.YOLOv5PostProcessOp.create(anchors, shapes, formats, quant_infos, image_height, image_width, confidence_threshold,
+ iou_threshold, num_of_classes, max_boxes, cross_classes)
def execute(self, net_flow_tensors):
return self._op.execute(net_flow_tensors)
\ No newline at end of file
import os
-import subprocess
import pathlib
+import subprocess
import sys
+import pkg_resources
+
import hailo_platform
from hailo_platform.tools.hailocli.base_utils import HailortCliUtil
-import pkg_resources
"""
HailoRTCLI matching commands in Hailo-CLI tool.
"""
+
class BenchmarkCommandCLI(HailortCliUtil):
def __init__(self, parser):
super().__init__(parser, 'benchmark')
-
+
class FWConfigCommandCLI(HailortCliUtil):
"""CLI tool for changing the FW configuration (User Config)"""
+
def __init__(self, parser):
super().__init__(parser, 'fw-config')
class BoardConfigCommandCLI(HailortCliUtil):
"""CLI tool for changing the FW configuration (Board Config)"""
+
def __init__(self, parser):
super().__init__(parser, 'board-config')
def __init__(self, parser):
super().__init__(parser, 'measure-power')
-
+
class RunCommandCLI(HailortCliUtil):
def __init__(self, parser):
super().__init__(parser, 'run')
class SensorConfigCommandCLI(HailortCliUtil):
def __init__(self, parser):
super().__init__(parser, 'sensor-config')
-
-
+
+
class FWUpdaterCLI(HailortCliUtil):
"""Cli tool for firmware updates"""
+
def __init__(self, parser):
super().__init__(parser, 'fw-update')
-
+
class SSBUpdaterCLI(HailortCliUtil):
"""Cli tool for second stage boot updates"""
+
def __init__(self, parser):
super().__init__(parser, 'ssb-update')
-
+
class UDPRateLimiterCLI(HailortCliUtil):
"""CLI tool for UDP rate limitation."""
- def __init__(self, parser):
- super().__init__(parser, 'udp-rate-limiter')
-
-class VersionCLI(HailortCliUtil):
- """CLI tool for hailort version."""
def __init__(self, parser):
- super().__init__(parser, '--version')
+ super().__init__(parser, 'udp-rate-limiter')
class TutorialRequired(Exception):
class TutorialRunnerCLI():
-
TUTORIALS_DIR = os.path.join(pathlib.Path(hailo_platform.__file__).parent.parent, 'hailo_tutorials/notebooks/')
TUTORIALS_REQUIREMENTS = ["jupyter"]
ERROR_MSG = """
working_set.require(req)
except pkg_resources.DistributionNotFound:
missing_pkgs.append(req)
-
+
if missing_pkgs:
sys.tracebacklimit = 0
raise TutorialRequired(f"\n{self.ERROR_MSG}\n {'; '.join([f'pip install {pkg}' for pkg in missing_pkgs])}")
#!/usr/bin/env python
import argparse
-import argcomplete
import sys
+import argcomplete
+
import hailo_platform
-from hailo_platform.tools.hailocli.base_utils import HailortCliUtil, Helper, HailortCliUtilError
-from hailo_platform.tools.hailocli.hailocli_commands import (FWUpdaterCLI, SSBUpdaterCLI, ControlCommandCLI, ScanCommandCLI,
- LoggerCommandCLI, MeasurePowerCommandCLI, RunCommandCLI, SensorConfigCommandCLI,
- FWConfigCommandCLI, BenchmarkCommandCLI, UDPRateLimiterCLI, MonitorCommandCLI, ParseHEFCommandCLI, TutorialRunnerCLI)
+from hailo_platform.tools.hailocli.base_utils import HailortCliUtil, HailortCliUtilError, Helper
+from hailo_platform.tools.hailocli.hailocli_commands import (BenchmarkCommandCLI, ControlCommandCLI, FWConfigCommandCLI,
+ FWUpdaterCLI, LoggerCommandCLI, MeasurePowerCommandCLI,
+ MonitorCommandCLI, ParseHEFCommandCLI, RunCommandCLI,
+ SSBUpdaterCLI, ScanCommandCLI, SensorConfigCommandCLI,
+ TutorialRunnerCLI, UDPRateLimiterCLI)
+from hailo_platform.tools.hailocli.version_action import CustomVersionAction
+
# Note: PlatformCommands are external dependencies in phase2-sdk/demos repo; don't change!
class PlatformCommands:
'sensor-config': ('Sensor configuration tool', SensorConfigCommandCLI),
'run': ('Run a compiled network', RunCommandCLI),
'benchmark': ('Measure basic performance on compiled network', BenchmarkCommandCLI),
- 'monitor': ("Monitor of networks - Presents information about the running networks. To enable monitor, set in the application process the environment variable 'SCHEDULER_MONITOR' to 1.", MonitorCommandCLI),
+ 'monitor': ("Monitor of networks - Presents information about the running networks. To enable monitor, set in the application process the environment variable 'HAILO_MONITOR' to 1.", MonitorCommandCLI),
'parse-hef': (' Parse HEF to get information about its components', ParseHEFCommandCLI),
'measure-power': ('Measures power consumption', MeasurePowerCommandCLI),
'tutorial': ('Runs the tutorials in jupyter notebook', TutorialRunnerCLI),
- #'--version': ('Print program version and exit', VersionCLI)
}
def __init__(self):
self.parser = argparse.ArgumentParser(description=self._get_generic_description())
+ self.parser.register('action', 'custom_version', CustomVersionAction)
+ self.parser.add_argument('--version', action='custom_version')
self.subparsers = self.parser.add_subparsers(help='Hailo utilities aimed to help with everything you need')
self.COMMANDS = {}
self.COMMANDS.update(type(self).PLATFORM_COMMANDS)
# Dependency injection for testing
def _run(self, argv):
self.COMMANDS['help'] = ('show the list of commands', Helper(self.COMMANDS))
+
# Create the commands and let them set the arguments
commands = {}
for command_name, (help_, command_class) in self.COMMANDS.items():
command_name = argv[0]
if (command_name in commands) and isinstance(commands[command_name], HailortCliUtil):
# HailortCliUtil just passes the rest of the argv to hailortcli
- try :
+ try:
return commands[command_name].run(argv[1:])
except HailortCliUtilError as e:
- print('\n'+ str(e))
+ print('\n' + str(e))
return
# This isn't a HailortCliUtil commnad, parse with argparse
--- /dev/null
+import argparse
+
+import hailo_platform
+
+
+class CustomVersionAction(argparse.Action):
+ def __init__(self,
+ option_strings,
+ dest=argparse.SUPPRESS,
+ default=argparse.SUPPRESS,
+ help="show program's version number and exit"):
+ super(CustomVersionAction, self).__init__(
+ option_strings=option_strings,
+ dest=dest,
+ default=default,
+ nargs=0,
+ help=help)
+
+ @staticmethod
+ def _print_version():
+ print(f'HailoRT v{hailo_platform.__version__}')
+
+ try:
+ import hailo_sdk_client
+ print(f'Hailo Dataflow Compiler v{hailo_sdk_client.__version__}')
+ except ImportError:
+ pass
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ self._print_version()
+ parser.exit()
class RateLimiterWrapper(object):
"""UDPRateLimiter wrapper enabling ``with`` statements."""
- def __init__(self, network_group, fps=1, fps_factor=1.0):
+ def __init__(self, network_group, fps=1, fps_factor=1.0, remote_ip=None, hw_arch=None):
"""RateLimiterWrapper constructor.
Args:
if not isinstance(network_group, ConfiguredNetwork):
return RateLimiterException("The API was changed. RateLimiterWrapper accept ConfiguredNetwork instead of ActivatedNetwork")
self._network_group = network_group
- self._remote_ip = network_group._target.remote_ip
+ if remote_ip is not None:
+ self._remote_ip = remote_ip
+ else:
+ # this line should be removed. this parameter will be removed from the object
+ self._remote_ip = network_group._target.device_id
self._fps = fps
self._fps_factor = fps_factor
- self._hw_arch = network_group._target._hw_arch
+ if hw_arch is not None:
+ self._hw_arch = hw_arch
+ else:
+ # this line should be removed. this parameter will be removed from the object
+ self._hw_arch = network_group._target._hw_arch if hasattr(network_group._target, '_hw_arch') else None
self._rates_dict = {}
self._tc_dict = {}
"source": [
"import numpy as np\n",
"from multiprocessing import Process\n",
- "from hailo_platform import (HEF, PcieDevice, HailoStreamInterface, InferVStreams, ConfigureParams,\n",
+ "from hailo_platform import (HEF, VDevice, HailoStreamInterface, InferVStreams, ConfigureParams,\n",
" InputVStreamParams, OutputVStreamParams, InputVStreams, OutputVStreams, FormatType)\n",
"\n",
"# The target can be used as a context manager (\"with\" statement) to ensure it's released on time.\n",
"# Here it's avoided for the sake of simplicity\n",
- "target = PcieDevice()\n",
+ "target = VDevice()\n",
"\n",
"# Loading compiled HEFs to device:\n",
"model_name = 'resnet_v1_18'\n",
"linux_aarch64",
],
url="https://hailo.ai/",
- version="4.12.1",
+ version="4.13.0",
zip_safe=False,
)
hef_api.cpp
vstream_api.cpp
quantization_api.cpp
+ ${HAILORT_OPS_CPP_SOURCES}
${HAILORT_COMMON_CPP_SOURCES}
)
if(WIN32)
target_link_libraries(_pyhailort PRIVATE Ws2_32 Iphlpapi Shlwapi)
endif()
+if(HAILO_BUILD_SERVICE)
+ target_link_libraries(_pyhailort PRIVATE grpc++_unsecure hailort_rpc_grpc_proto hef_proto)
+endif()
+
target_compile_options(_pyhailort PRIVATE ${HAILORT_COMPILE_OPTIONS})
exclude_archive_libs_symbols(_pyhailort)
#include "hailo/hailort.h"
#include "hailo/hailort_common.hpp"
#include "hailo/network_group.hpp"
-#include "utils.hpp"
+
#include "common/logger_macros.hpp"
+#include "utils.hpp"
+
+#include <pybind11/numpy.h>
+
+
namespace hailort
{
class HailoRTBindingsCommon
py::bytes DeviceWrapper::read_memory(uint32_t address, uint32_t length)
{
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(length, '\x00');
- VALIDATE_NOT_NULL(response);
+ VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
MemoryView data_view(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(response->data())), length);
auto status = device().read_memory(address, data_view);
void DeviceWrapper::i2c_write(hailo_i2c_slave_config_t *slave_config, uint32_t register_address, py::bytes data,
uint32_t length)
{
- VALIDATE_NOT_NULL(slave_config);
+ VALIDATE_NOT_NULL(slave_config, HAILO_INVALID_ARGUMENT);
std::string data_str(data);
MemoryView data_view = MemoryView::create_const(data_str.c_str(), length);
py::bytes DeviceWrapper::i2c_read(hailo_i2c_slave_config_t *slave_config, uint32_t register_address, uint32_t length)
{
- VALIDATE_NOT_NULL(slave_config);
+ VALIDATE_NOT_NULL(slave_config, HAILO_INVALID_ARGUMENT);
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(length, '\x00');
- VALIDATE_NOT_NULL(response);
+ VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
MemoryView data_view(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(response->data())), length);
auto status = device().i2c_read(*slave_config, register_address, data_view);
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(
const_cast<char*>(reinterpret_cast<const char*>(config_buffer->data())), config_buffer->size());
- VALIDATE_NOT_NULL(response);
+ VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
return *response;
}
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(
const_cast<char*>(reinterpret_cast<const char*>(config_buffer->data())), config_buffer->size());
- VALIDATE_NOT_NULL(response);
+ VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
return *response;
}
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(
const_cast<char*>(reinterpret_cast<const char*>(buffer->data())), buffer->size());
- VALIDATE_NOT_NULL(response);
+ VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
return *response;
}
{
const auto buffer_str = static_cast<std::string>(buffer);
hailo_status status = device().direct_write_memory(address, buffer_str.c_str(),
- (uint32_t) (buffer_str.length()));
+ (uint32_t)(buffer_str.length()));
VALIDATE_STATUS(status);
}
void DeviceWrapper::add_to_python_module(py::module &m)
{
py::class_<DeviceWrapper>(m, "Device")
+ .def("is_valid", &DeviceWrapper::is_valid)
+
// Scan
.def("scan", &DeviceWrapper::scan)
#ifndef _DEVICE_API_HPP_
#define _DEVICE_API_HPP_
-#include "utils.hpp"
-#include "hailo/hailort.hpp"
+#include "hailo/hailort.h"
+#include "hailo/device.hpp"
+
#include "common/socket.hpp"
+
+#include "utils.hpp"
#include "hef_api.hpp"
#include <pybind11/pybind11.h>
-#include <pybind11/pybind11.h>
namespace hailort
Device& device()
{
- VALIDATE_NOT_NULL(m_device);
+ VALIDATE_NOT_NULL(m_device, HAILO_INTERNAL_FAILURE);
return *(m_device.get());
}
const Device& device() const
{
- VALIDATE_NOT_NULL(m_device);
+ VALIDATE_NOT_NULL(m_device, HAILO_INTERNAL_FAILURE);
return *(m_device.get());
}
+ bool is_valid()
+ {
+ return (nullptr != m_device);
+ }
+
Device& operator*() // Used for control_internals
{
return device();
const ActivatedNetworkGroup& ActivatedAppContextManagerWrapper::enter()
{
auto activated = m_net_group.activate(m_network_group_params);
- VALIDATE_EXPECTED(activated);
-
- m_activated_net_group = activated.release();
+ if (activated.status() != HAILO_NOT_IMPLEMENTED) {
+ VALIDATE_EXPECTED(activated);
+ m_activated_net_group = activated.release();
+ }
return std::ref(*m_activated_net_group);
}
.def("wait_for_activation", [](ConfiguredNetworkGroup& self, uint32_t timeout_ms)
{
auto status = self.wait_for_activation(std::chrono::milliseconds(timeout_ms));
- VALIDATE_STATUS(status);
+ if (status != HAILO_NOT_IMPLEMENTED) {
+ VALIDATE_STATUS(status);
+ }
})
.def("InputVStreams", [](ConfiguredNetworkGroup &self, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
{
return py::cast(results.value());
})
+ .def("before_fork", [](ConfiguredNetworkGroup& self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.before_fork();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ })
+ .def("after_fork_in_parent", [](ConfiguredNetworkGroup& self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.after_fork_in_parent();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ })
+ .def("after_fork_in_child", [](ConfiguredNetworkGroup& self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.after_fork_in_child();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ })
+ .def("set_scheduler_timeout", [](ConfiguredNetworkGroup& self, int timeout, const std::string &network_name="")
+ {
+ auto timeout_mili = std::chrono::milliseconds(timeout);
+ auto status = self.set_scheduler_timeout(timeout_mili, network_name);
+ VALIDATE_STATUS(status);
+ })
+ .def("set_scheduler_threshold", [](ConfiguredNetworkGroup& self, uint32_t threshold)
+ {
+ auto status = self.set_scheduler_threshold(threshold);
+ VALIDATE_STATUS(status);
+ })
+ .def("set_scheduler_priority", [](ConfiguredNetworkGroup& self, uint8_t priority)
+ {
+ auto status = self.set_scheduler_priority(priority);
+ VALIDATE_STATUS(status);
+ })
;
ActivatedAppContextManagerWrapper::add_to_python_module(m);
-cmake_minimum_required(VERSION 3.0.0)
+cmake_minimum_required(VERSION 3.15.0)
pybind11_add_module(_pyhailort_internal SHARED
pyhailort_internal.cpp
control_api.cpp
- ${HAILORT_SRCS_ABS}
+ $<TARGET_OBJECTS:libhailort>
)
+add_dependencies(_pyhailort_internal libhailort)
+
set_target_properties(_pyhailort_internal PROPERTIES
CXX_STANDARD 14
CXX_STANDARD_REQUIRED YES
)
target_link_libraries(_pyhailort_internal PRIVATE
- libhailort
hef_proto
spdlog::spdlog
readerwriterqueue
py::bytes ControlWrapper::sensor_get_config(DeviceWrapper &device, uint32_t section_index, uint32_t offset, uint32_t data_length)
{
std::unique_ptr<std::string> response = make_unique_nothrow<std::string>(data_length, '\x00');
- VALIDATE_NOT_NULL(response);
+ VALIDATE_NOT_NULL(response, HAILO_OUT_OF_HOST_MEMORY);
auto status = Control::sensor_get_config(*device, section_index, offset, data_length, (uint8_t*)(response->data()));
VALIDATE_STATUS(status);
#ifndef _CONTROL_API_HPP_
#define _CONTROL_API_HPP_
-#include "control.hpp"
+#include "device_common/control.hpp"
#include "utils.hpp"
#include "device_api.hpp"
+
+
+#include "hailo/hailort.h"
+
+#include "transform/transform_internal.hpp"
+#include "bindings_common.hpp"
+
+#include "pyhailort_internal.hpp"
+#include "control_api.hpp"
+#include "utils.hpp"
+#include "utils.h"
+
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/detail/common.h>
#include <pybind11/functional.h>
#include <vector>
-#include "pyhailort_internal.hpp"
-#include "control_api.hpp"
-#include "utils.hpp"
-#include "utils.h"
-
-#include "hailo/hailort.h"
-#include "transform_internal.hpp"
-#include "bindings_common.hpp"
-
namespace hailort
{
+// TODO: Remove (HRT-9944)
+// Duplicated for hailo post process test with python API.
+static const uint32_t TEST_NUM_OF_CLASSES = 80;
-static const uint32_t TEST_NUM_OF_CLASSES2 = 80;
-py::array PyhailortInternal::get_yolov5_post_process_expected_buffer()
+Expected<Buffer> get_expected_buffer_float32()
{
static const uint32_t DETECTION_CLASS_ID_1 = 0;
static const float32_t CLASS_ID_1_DETECTION_COUNT = 5;
};
static const uint32_t DETECTION_COUNT = 9;
- auto buffer_size = (DETECTION_COUNT * sizeof(hailo_bbox_float32_t)) + (TEST_NUM_OF_CLASSES2 * sizeof(float32_t));
- auto buffer_expected = hailort::Buffer::create(buffer_size, 0);
- // CATCH_REQUIRE_EXPECTED(buffer_expected);
+ auto buffer_size = (DETECTION_COUNT * sizeof(hailo_bbox_float32_t)) + (TEST_NUM_OF_CLASSES * sizeof(float32_t));
+ auto buffer_expected = Buffer::create(buffer_size, 0);
+ CHECK_EXPECTED(buffer_expected);
auto buffer = buffer_expected.release();
size_t offset = 0;
- for (uint32_t class_index = 0; class_index < TEST_NUM_OF_CLASSES2; class_index++) {
+ for (uint32_t class_index = 0; class_index < TEST_NUM_OF_CLASSES; class_index++) {
if (DETECTION_CLASS_ID_1 == class_index) {
memcpy(buffer.data() + offset, &CLASS_ID_1_DETECTION_COUNT, sizeof(CLASS_ID_1_DETECTION_COUNT));
offset += sizeof(CLASS_ID_1_DETECTION_COUNT);
}
}
+ return buffer;
+}
+
+py::array PyhailortInternal::get_yolov5_post_process_expected_buffer()
+{
+ auto buffer = get_expected_buffer_float32();
+ VALIDATE_EXPECTED(buffer);
+
// Note: The ownership of the buffer is transferred to Python wrapped as a py::array.
// When the py::array isn't referenced anymore in Python and is destructed, the py::capsule's dtor
// is called too (and it deletes the raw buffer)
auto type = py::dtype(HailoRTBindingsCommon::convert_format_type_to_string(HAILO_FORMAT_TYPE_FLOAT32));
- auto shape = *py::array::ShapeContainer({buffer.size()});
- const auto unmanaged_addr = buffer.release();
+ auto shape = *py::array::ShapeContainer({buffer->size()});
+ const auto unmanaged_addr = buffer.release().release();
return py::array(type, shape, unmanaged_addr,
py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
}
py::list PyhailortInternal::get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name)
{
- auto network_group_metadata = hef.hef_ptr()->pimpl->get_network_group_metadata(net_group_name);
- VALIDATE_EXPECTED(network_group_metadata);
+ auto core_op_metadata = hef.hef_ptr()->pimpl->get_core_op_metadata(net_group_name);
+ VALIDATE_EXPECTED(core_op_metadata);
- return py::cast(network_group_metadata->get_all_layer_infos());
+ return py::cast(core_op_metadata->get_all_layer_infos());
}
PYBIND11_MODULE(_pyhailort_internal, m) {
#ifndef _PYHAILORT_INTERNAL_
#define _PYHAILORT_INTERNAL_
+#include "hef/hef_internal.hpp"
+
+#include "hef_api.hpp"
+#include "utils.hpp"
+#include "utils.h"
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/detail/common.h>
#include <pybind11/functional.h>
#include <vector>
-#include "hef_internal.hpp"
-#include "hef_api.hpp"
-#include "utils.hpp"
-#include "utils.h"
namespace hailort
{
#ifndef _HAILO_NET_FLOW_API_HPP_
#define _HAILO_NET_FLOW_API_HPP_
+#include "hailo/hailort.h"
+
+#include "net_flow/ops/yolo_post_process.hpp"
+
#include "utils.hpp"
-#include "hailo/hailort.hpp"
#include "bindings_common.hpp"
-#include "net_flow/ops/yolo_post_processing.hpp"
+
namespace hailort
{
namespace net_flow
{
-class YOLOv5PostProcessingOpWrapper
+class YOLOv5PostProcessOpWrapper
{
public:
- static YOLOv5PostProcessingOpWrapper create(const std::vector<std::vector<int>> &anchors,
+ static YOLOv5PostProcessOpWrapper create(const std::vector<std::vector<int>> &anchors,
const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
const std::vector<hailo_quant_info_t> &quant_infos, float32_t image_height, float32_t image_width, float32_t confidence_threshold,
- float32_t iou_threshold, uint32_t num_of_classes, bool should_dequantize, uint32_t max_boxes, bool should_sigmoid,
- bool one_class_per_bbox=true)
+ float32_t iou_threshold, uint32_t num_of_classes, uint32_t max_boxes,
+ bool cross_classes=true)
{
- auto op = YOLOv5PostProcessingOp::create(anchors, shapes, formats, quant_infos, image_height, image_width,
- confidence_threshold, iou_threshold, num_of_classes, should_dequantize, max_boxes, should_sigmoid, one_class_per_bbox);
+ std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
+ std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+
+ net_flow::NmsPostProcessConfig nms_post_process_config{};
+ nms_post_process_config.nms_score_th = confidence_threshold;
+ nms_post_process_config.nms_iou_th = iou_threshold;
+ nms_post_process_config.max_proposals_per_class = max_boxes;
+ nms_post_process_config.classes = num_of_classes;
+ nms_post_process_config.background_removal = false;
+ nms_post_process_config.background_removal_index = 0;
+ nms_post_process_config.cross_classes = cross_classes;
+ net_flow::YoloPostProcessConfig yolo_post_process_config{};
+ yolo_post_process_config.image_height = image_height;
+ yolo_post_process_config.image_width = image_width;
+ // Each layer anchors vector is structured as {w,h} pairs.
+ for (size_t i = 0; i < anchors.size(); ++i) {
+ auto name = std::to_string(i);
+ yolo_post_process_config.anchors.insert({name, anchors[i]});
+ BufferMetaData input_metadata = {
+ shapes[i],
+ shapes[i],
+ formats[i],
+ quant_infos[i]
+ };
+ inputs_metadata.insert({name, input_metadata});
+ }
+ auto op = YOLOv5PostProcessOp::create(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config);
VALIDATE_EXPECTED(op);
-
- return YOLOv5PostProcessingOpWrapper(op.release(), num_of_classes, max_boxes);
+
+ return YOLOv5PostProcessOpWrapper(op.release(), num_of_classes, max_boxes);
}
static void add_to_python_module(py::module &m)
{
- py::class_<YOLOv5PostProcessingOpWrapper>(m, "YOLOv5PostProcessingOp")
- .def("create", &YOLOv5PostProcessingOpWrapper::create)
- .def("execute",[](YOLOv5PostProcessingOpWrapper &self, const std::vector<py::array> &tensors)
+ py::class_<YOLOv5PostProcessOpWrapper>(m, "YOLOv5PostProcessOp")
+ .def("create", &YOLOv5PostProcessOpWrapper::create)
+ .def("execute",[](YOLOv5PostProcessOpWrapper &self, const std::vector<py::array> &tensors)
{
- std::vector<MemoryView> data_views;
- data_views.reserve(tensors.size());
- for (auto &tensor : tensors) {
- data_views.push_back(MemoryView(const_cast<void*>(reinterpret_cast<const void*>(tensor.data())), tensor.nbytes()));
+ std::map<std::string, MemoryView> data_views;
+ for (size_t i = 0; i < tensors.size(); ++i) {
+ data_views.insert({std::to_string(i),
+ MemoryView(const_cast<void*>(reinterpret_cast<const void*>(tensors[i].data())), tensors[i].nbytes())});
}
hailo_nms_info_t nms_info = {
auto buffer = Buffer::create(HailoRTCommon::get_nms_host_frame_size(nms_info, output_format), 0);
VALIDATE_STATUS(buffer.status());
- auto status = self.m_post_processing_op.execute<float32_t>(data_views, MemoryView(buffer.value().data(), buffer.value().size()));
+ std::map<std::string, MemoryView> outputs;
+ outputs.insert({"", MemoryView(buffer.value().data(), buffer.value().size())});
+ auto status = self.m_post_processing_op->execute(data_views, outputs);
VALIDATE_STATUS(status);
// Note: The ownership of the buffer is transferred to Python wrapped as a py::array.
}
private:
- YOLOv5PostProcessingOpWrapper(YOLOv5PostProcessingOp &&post_processing_op, uint32_t num_of_classes, uint32_t max_bboxes)
+ YOLOv5PostProcessOpWrapper(std::shared_ptr<Op> post_processing_op, uint32_t num_of_classes, uint32_t max_bboxes)
: m_post_processing_op(post_processing_op),
m_num_of_classes(num_of_classes),
m_max_boxes(max_bboxes) {}
- YOLOv5PostProcessingOp m_post_processing_op;
+ std::shared_ptr<Op> m_post_processing_op;
uint32_t m_num_of_classes = 0;
uint32_t m_max_boxes = 0;
};
void NetFlow_api_initialize_python_module(py::module &m)
{
- YOLOv5PostProcessingOpWrapper::add_to_python_module(m);
+ YOLOv5PostProcessOpWrapper::add_to_python_module(m);
}
#include <exception>
using namespace std;
-#include "hailo/hailort.hpp"
+#include "hailo/hailort.h"
+#include "hailo/hailort_defaults.hpp"
#include "hef_api.hpp"
#include "vstream_api.hpp"
#include "bindings_common.hpp"
#include "sensor_config_exports.h"
-#include "hailort_defaults.hpp"
#if defined(__GNUC__)
#include "common/os/posix/traffic_control.hpp"
#endif
(first.order == second.order) &&
(first.flags == second.flags));
}
+
class UdpScan {
public:
UdpScan() = default;
.value("HAILO8_A0", HAILO_ARCH_HAILO8_A0)
.value("HAILO8", HAILO_ARCH_HAILO8)
.value("HAILO8L", HAILO_ARCH_HAILO8L)
- .value("MERCURY_CA", HAILO_ARCH_MERCURY_CA)
+ .value("HAILO15", HAILO_ARCH_HAILO15)
;
/* TODO: SDK-15648 */
.value("YYUV", HAILO_FORMAT_ORDER_HAILO_YYUV)
.value("NV21", HAILO_FORMAT_ORDER_NV21)
.value("YYVU", HAILO_FORMAT_ORDER_HAILO_YYVU)
+ .value("RGB4", HAILO_FORMAT_ORDER_RGB4)
+ .value("I420", HAILO_FORMAT_ORDER_I420)
+ .value("YYYYUV", HAILO_FORMAT_ORDER_HAILO_YYYYUV)
;
py::enum_<hailo_format_flags_t>(m, "FormatFlags", py::arithmetic())
.def(py::init<>())
;
- py::class_<hailo_core_input_stream_params_t>(m, "CoreInputStreamParams")
+ py::class_<hailo_integrated_input_stream_params_t>(m, "IntegratedInputStreamParams")
.def(py::init<>())
;
- py::class_<hailo_core_output_stream_params_t>(m, "CoreOutputStreamParams")
+ py::class_<hailo_integrated_output_stream_params_t>(m, "IntegratedOutputStreamParams")
.def(py::init<>())
;
py::enum_<hailo_stream_interface_t>(m, "StreamInterface")
.value("PCIe", HAILO_STREAM_INTERFACE_PCIE)
- .value("CORE", HAILO_STREAM_INTERFACE_CORE)
+ .value("INTEGRATED", HAILO_STREAM_INTERFACE_INTEGRATED)
.value("ETH", HAILO_STREAM_INTERFACE_ETH)
.value("MIPI", HAILO_STREAM_INTERFACE_MIPI)
;
py::class_<hailo_activate_network_group_params_t>(m, "ActivateNetworkGroupParams")
.def(py::init<>())
.def_static("default", []() {
- return HailoRTDefaults::get_network_group_params();
+ return HailoRTDefaults::get_active_network_group_params();
});
;
+ py::enum_<hailo_scheduling_algorithm_t>(m, "SchedulingAlgorithm")
+ .value("NONE", HAILO_SCHEDULING_ALGORITHM_NONE)
+ .value("ROUND_ROBIN", HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN)
+ ;
+
py::class_<VDeviceParamsWrapper>(m, "VDeviceParams")
.def(py::init<>())
+ // Add device_ids
.def_property("device_count",
[](const VDeviceParamsWrapper& params) -> uint32_t {
return params.orig_params.device_count;
params.orig_params.device_count = device_count;
}
)
+ .def_property("scheduling_algorithm",
+ [](const VDeviceParamsWrapper& params) -> uint32_t {
+ return params.orig_params.scheduling_algorithm;
+ },
+ [](VDeviceParamsWrapper& params, hailo_scheduling_algorithm_t scheduling_algorithm) {
+ params.orig_params.scheduling_algorithm = scheduling_algorithm;
+ }
+ )
.def_property("group_id",
[](const VDeviceParamsWrapper& params) -> py::str {
return std::string(params.orig_params.group_id);
params.orig_params.group_id = params.group_id_str.c_str();
}
)
+ .def_property("multi_process_service",
+ [](const VDeviceParamsWrapper& params) -> uint32_t {
+ return params.orig_params.multi_process_service;
+ },
+ [](VDeviceParamsWrapper& params, bool multi_process_service) {
+ params.orig_params.multi_process_service = multi_process_service;
+ }
+ )
.def_static("default", []() {
auto orig_params = HailoRTDefaults::get_vdevice_params();
orig_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
.def_readonly("direction", &hailo_stream_parameters_t::direction)
STREAM_PARAMETERS_UNION_PROPERTY(pcie_input_params, hailo_pcie_input_stream_params_t,
HAILO_STREAM_INTERFACE_PCIE, HAILO_H2D_STREAM)
- STREAM_PARAMETERS_UNION_PROPERTY(core_input_params, hailo_core_input_stream_params_t,
- HAILO_STREAM_INTERFACE_CORE, HAILO_H2D_STREAM)
+ STREAM_PARAMETERS_UNION_PROPERTY(integrated_input_params, hailo_integrated_input_stream_params_t,
+ HAILO_STREAM_INTERFACE_INTEGRATED, HAILO_H2D_STREAM)
STREAM_PARAMETERS_UNION_PROPERTY(eth_input_params, hailo_eth_input_stream_params_t,
HAILO_STREAM_INTERFACE_ETH, HAILO_H2D_STREAM)
STREAM_PARAMETERS_UNION_PROPERTY(mipi_input_params, hailo_mipi_input_stream_params_t,
HAILO_STREAM_INTERFACE_PCIE, HAILO_D2H_STREAM)
STREAM_PARAMETERS_UNION_PROPERTY(eth_output_params, hailo_eth_output_stream_params_t,
HAILO_STREAM_INTERFACE_ETH, HAILO_D2H_STREAM)
- STREAM_PARAMETERS_UNION_PROPERTY(core_output_params, hailo_core_output_stream_params_t,
- HAILO_STREAM_INTERFACE_CORE, HAILO_D2H_STREAM)
+ STREAM_PARAMETERS_UNION_PROPERTY(integrated_output_params, hailo_integrated_output_stream_params_t,
+ HAILO_STREAM_INTERFACE_INTEGRATED, HAILO_D2H_STREAM)
;
py::class_<hailo_network_parameters_t>(m, "NetworkParameters")
.def_readonly("name", &hailo_stream_info_t::name)
.def_readonly("sys_index", &hailo_stream_info_t::index)
.def_readonly("data_bytes", &hailo_stream_info_t::hw_data_bytes)
+ .def_readonly("quant_info", &hailo_stream_info_t::quant_info)
.def("__repr__", [](const hailo_stream_info_t &self) {
return std::string("StreamInfo(\"") + std::string(self.name) + std::string("\")");
})
* @brief Quantization python bindings functions
**/
+#include "hailo/quantization.hpp"
+
#include "quantization_api.hpp"
#include "bindings_common.hpp"
#ifndef _HAILO_QUANTIZATION_API_HPP_
#define _HAILO_QUANTIZATION_API_HPP_
-#include "hailo/hailort.hpp"
+#include "hailo/hailort.h"
+
#include "utils.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
+
namespace hailort
{
} \
} while (0)
-#define VALIDATE_NOT_NULL(__ptr) \
+#define VALIDATE_NOT_NULL(__ptr, __status) \
do { \
if (nullptr == (__ptr)) { \
- throw HailoRTStatusException(std::to_string(HAILO_INVALID_ARGUMENT)); \
+ throw HailoRTStatusException(std::to_string(__status)); \
} \
} while (0)
#include "hailo/hef.hpp"
#include "hailo/vdevice.hpp"
+#include "hailo/hailort_common.hpp"
-#include "utils.hpp"
#include "common/logger_macros.hpp"
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+#include "service/rpc_client_utils.hpp"
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+#include "utils.hpp"
+
#include <pybind11/pybind11.h>
#include <pybind11/numpy.h>
#include <pybind11/detail/common.h>
#include <string>
+
namespace hailort
{
return VDeviceWrapper(params.orig_params);
}
+ static VDeviceWrapper create(const VDeviceParamsWrapper ¶ms, const std::vector<std::string> &device_ids)
+ {
+ if (params.orig_params.device_ids != nullptr && (!device_ids.empty())) {
+ LOGGER__ERROR("VDevice device_ids can be set in params or device_ids argument. Both parameters were passed to the c'tor");
+ throw HailoRTStatusException(std::to_string(HAILO_INVALID_OPERATION));
+ }
+ auto modified_params = params;
+ auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
+ VALIDATE_EXPECTED(device_ids_vector);
+ modified_params.orig_params.device_ids = device_ids_vector->data();
+ return VDeviceWrapper(modified_params.orig_params);
+ }
+
static VDeviceWrapper create_from_ids(const std::vector<std::string> &device_ids)
{
auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
VALIDATE_EXPECTED(network_groups);
py::list results;
+ m_net_groups.reserve(m_net_groups.size() + network_groups->size());
for (const auto &network_group : network_groups.value()) {
results.append(network_group.get());
+ m_net_groups.emplace_back(network_group);
}
return results;
void release()
{
+ m_net_groups.clear();
m_vdevice.reset();
}
+ void before_fork()
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ if (m_vdevice != nullptr) {
+ auto status = m_vdevice->before_fork();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+
+ void after_fork_in_parent()
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ if (m_vdevice != nullptr) {
+ auto status = m_vdevice->after_fork_in_parent();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+
+ void after_fork_in_child()
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ if (m_vdevice != nullptr) {
+ auto status = m_vdevice->after_fork_in_child();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+
private:
std::unique_ptr<VDevice> m_vdevice;
+ ConfiguredNetworkGroupVector m_net_groups;
};
void VDevice_api_initialize_python_module(py::module &m)
py::class_<VDeviceWrapper>(m, "VDevice")
.def("create", py::overload_cast<const hailo_vdevice_params_t&>(&VDeviceWrapper::create))
.def("create", py::overload_cast<const VDeviceParamsWrapper&>(&VDeviceWrapper::create))
+ .def("create", py::overload_cast<const VDeviceParamsWrapper&, const std::vector<std::string>&>(&VDeviceWrapper::create))
.def("create_from_ids", &VDeviceWrapper::create_from_ids)
.def("get_physical_devices_ids", &VDeviceWrapper::get_physical_devices_ids)
.def("configure", &VDeviceWrapper::configure)
.def("release", &VDeviceWrapper::release)
+ .def("before_fork", &VDeviceWrapper::before_fork)
+ .def("after_fork_in_parent", &VDeviceWrapper::after_fork_in_parent)
+ .def("after_fork_in_child", &VDeviceWrapper::after_fork_in_child)
;
}
MemoryView(const_cast<void*>(reinterpret_cast<const void*>(data.data())), data.nbytes()));
VALIDATE_STATUS(status);
})
+ .def("before_fork", [](InputVStream &self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.before_fork();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+ )
+ .def("after_fork_in_parent", [](InputVStream &self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.after_fork_in_parent();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+ )
+ .def("after_fork_in_child", [](InputVStream &self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.after_fork_in_child();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+ )
.def("flush", [](InputVStream &self)
{
hailo_status status = self.flush();
VALIDATE_STATUS(status);
}
+void InputVStreamsWrapper::before_fork()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ for (auto &input_vstream : m_input_vstreams) {
+ auto status = input_vstream.second->before_fork();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+}
+
+void InputVStreamsWrapper::after_fork_in_parent()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ for (auto &input_vstream : m_input_vstreams) {
+ auto status = input_vstream.second->after_fork_in_parent();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+}
+
+void InputVStreamsWrapper::after_fork_in_child()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ for (auto &input_vstream : m_input_vstreams) {
+ auto status = input_vstream.second->after_fork_in_child();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+}
+
void InputVStreamsWrapper::add_to_python_module(py::module &m)
{
py::class_<InputVStreamsWrapper>(m, "InputVStreams")
.def("clear", &InputVStreamsWrapper::clear)
.def("__enter__", &InputVStreamsWrapper::enter, py::return_value_policy::reference)
.def("__exit__", [&](InputVStreamsWrapper &self, py::args) { self.exit(); })
+ .def("before_fork", &InputVStreamsWrapper::before_fork)
+ .def("after_fork_in_parent", &InputVStreamsWrapper::after_fork_in_parent)
+ .def("after_fork_in_child", &InputVStreamsWrapper::after_fork_in_child)
;
}
return py::array(get_dtype(self), get_shape(self), unmanaged_addr,
py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
})
+ .def("before_fork", [](OutputVStream &self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.before_fork();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+ )
+ .def("after_fork_in_parent", [](OutputVStream &self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.after_fork_in_parent();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+ )
+ .def("after_fork_in_child", [](OutputVStream &self)
+ {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto status = self.after_fork_in_child();
+ VALIDATE_STATUS(status);
+#else
+ (void)self;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ }
+ )
.def_property_readonly("info", [](OutputVStream &self)
{
return self.get_info();
VALIDATE_STATUS(status);
}
+void OutputVStreamsWrapper::before_fork()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ for (auto &output_vstream : m_output_vstreams) {
+ auto status = output_vstream.second->before_fork();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+}
+
+void OutputVStreamsWrapper::after_fork_in_parent()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ for (auto &output_vstream : m_output_vstreams) {
+ auto status = output_vstream.second->after_fork_in_parent();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+}
+
+void OutputVStreamsWrapper::after_fork_in_child()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ for (auto &output_vstream : m_output_vstreams) {
+ auto status = output_vstream.second->after_fork_in_child();
+ VALIDATE_STATUS(status);
+ }
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+}
+
void OutputVStreamsWrapper::add_to_python_module(py::module &m)
{
py::class_<OutputVStreamsWrapper>(m, "OutputVStreams")
.def("clear", &OutputVStreamsWrapper::clear)
.def("__enter__", &OutputVStreamsWrapper::enter, py::return_value_policy::reference)
.def("__exit__", [&](OutputVStreamsWrapper &self, py::args) { self.exit(); })
+ .def("before_fork", &OutputVStreamsWrapper::before_fork)
+ .def("after_fork_in_parent", &OutputVStreamsWrapper::after_fork_in_parent)
+ .def("after_fork_in_child", &OutputVStreamsWrapper::after_fork_in_child)
;
}
std::shared_ptr<InputVStream> get_input_by_name(const std::string &name);
py::dict get_all_inputs();
void clear();
+ void before_fork();
+ void after_fork_in_parent();
+ void after_fork_in_child();
static void add_to_python_module(py::module &m);
private:
void exit();
py::dict get_all_outputs();
void clear();
+ void before_fork();
+ void after_fork_in_parent();
+ void after_fork_in_child();
static void add_to_python_module(py::module &m);
private:
- this example uses udp device.
- `raw_streams_example` - Basic inference of a shortcut network using raw stream api.
- The data is transformed before sent and after received in the same thread sending/receiving using the transformation api.
+ - `notification_callback_example` - Demonstrates how to work with notification callbacks.
- C++ examples:
- `vstreams_example` - Basic inference of a shortcut network, same as `vstreams_example` C example, uses HailoRT C++ api.
- `raw_streams_example` - Basic inference of a shortcut network, same as `raw_streams_example` C example, uses HailoRT C++ api.
- `multi_process_example` - Demonstrates how to work with HailoRT as a service and using the HailoRT Model Scheduler for network groups switching.
Using the script `multi_process_example.sh` one can specify the number of processes to run each hef, see `multi_process_example.sh -h` for more information.
-
+ - `notification_callback_example` - Demonstrates how to work with notification callbacks, same as `notification_callback_example` C example.
## Compiling with CMake
Examples are configured and compiled using the following commands:
```sh
add_subdirectory(switch_network_groups_manually_example)
add_subdirectory(multi_device_example)
add_subdirectory(power_measurement_example)
+add_subdirectory(notification_callback_example)
add_custom_target(c_hailort_examples)
add_dependencies(c_hailort_examples
c_switch_network_groups_example
c_switch_network_groups_manually_example
c_multi_device_example
- c_power_measurement_example)
\ No newline at end of file
+ c_power_measurement_example
+ c_notification_callback_example)
\ No newline at end of file
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(data_quantization_example.c PROPERTIES LANGUAGE C)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(infer_pipeline_example.c PROPERTIES LANGUAGE C)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(multi_device_example.c PROPERTIES LANGUAGE C)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(multi_network_vstream_example.c PROPERTIES LANGUAGE C)
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+find_package(Threads REQUIRED)
+set(THREADS_PREFER_PTHREAD_FLAG ON)
+
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
+
+SET_SOURCE_FILES_PROPERTIES(notification_callback_example.c PROPERTIES LANGUAGE C)
+
+add_executable(c_notification_callback_example notification_callback_example.c)
+target_link_libraries(c_notification_callback_example PRIVATE HailoRT::libhailort Threads::Threads)
+target_include_directories(c_notification_callback_example PRIVATE "${CMAKE_CURRENT_LIST_DIR}/../common")
+
+if(WIN32)
+ target_compile_options(c_notification_callback_example PRIVATE
+ /DWIN32_LEAN_AND_MEAN
+ /DNOMINMAX # NOMINMAX is required in order to play nice with std::min/std::max (otherwise Windows.h defines it's own)
+ /wd4201 /wd4251
+ )
+endif()
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file notification_callback_example.c
+ * This example demonstrates the basic usage of notification callbacks.
+ * The program creates a device and then sets and removes a notification callback on it.
+ * In this example the notification is HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM and the callback is a simple print function.
+ **/
+
+#include "common.h"
+#include "hailo_thread.h"
+#include "hailo/hailort.h"
+
+#define DEVICE_IDS_COUNT (16)
+
+void sleep_seconds(uint32_t duration_seconds)
+{
+#if defined(__unix__) || defined(__QNX__)
+ sleep(duration_seconds);
+#else
+ Sleep(duration_seconds);
+#endif
+}
+
+void callback(hailo_device device, const hailo_notification_t *notification, void *opaque)
+{
+ hailo_device_id_t device_id = {0};
+ hailo_status status = hailo_get_device_id(device, &device_id);
+ if (HAILO_SUCCESS != status){
+ printf("Couldn't get device id\n");
+ return;
+ }
+ printf("got a notification with notification id %d - Overcurrent Alarm\n", notification->id);
+ printf("device id: %s\n", device_id.id);
+ if(NULL == opaque)
+ printf("User defined data is null\n");
+}
+
+int main()
+{
+ hailo_device_id_t device_ids[DEVICE_IDS_COUNT];
+ size_t actual_devices_count = DEVICE_IDS_COUNT;
+ hailo_status status = HAILO_UNINITIALIZED;
+ hailo_device device = NULL;
+
+ // Scan to find a device
+ status = hailo_scan_devices(NULL, device_ids, &actual_devices_count);
+ REQUIRE_SUCCESS(status, l_exit, "Failed to scan devices");
+ REQUIRE_ACTION(1 <= actual_devices_count, status = HAILO_INVALID_OPERATION, l_exit,
+ "Failed to find a connected hailo device.");
+
+ // Create the device
+ status = hailo_create_device_by_id(&(device_ids[0]), &device);
+ REQUIRE_SUCCESS(status, l_exit, "Failed to create device");
+
+ // Set the callback function to the notification id HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM
+ hailo_notification_callback callback_func = &callback;
+ status = hailo_set_notification_callback(device, callback_func, HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM, NULL);
+ REQUIRE_SUCCESS(status, l_release_device, "Failed to set notification callback");
+
+ // In this part of the program - in case of HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM notification, the callback function will be called.
+ printf("Notification callback has been set - ");
+ printf("in case of overcurrent alarm notification, an overcurrent alarm message will be printed\n");
+ sleep_seconds(2);
+
+ // Remove the callback notification
+ status = hailo_remove_notification_callback(device, HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM);
+ REQUIRE_SUCCESS(status, l_release_device, "Failed to remove notification callback");
+ printf("Notification callback has been removed\n");
+
+l_release_device:
+ (void) hailo_release_device(device);
+l_exit:
+ return status;
+}
cmake_minimum_required(VERSION 3.0.0)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(power_measurement_example.c PROPERTIES LANGUAGE C)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(raw_streams_example.c PROPERTIES LANGUAGE C)
#define HEF_FILE ("hefs/shortcut_net.hef")
#define INFER_FRAME_COUNT (200)
#define MAX_EDGE_LAYERS (16)
+#define DEVICE_IDS_COUNT (16)
typedef struct write_thread_args_t {
hailo_input_stream *input_stream;
int main()
{
hailo_status status = HAILO_UNINITIALIZED;
- hailo_device_id_t device_id = {0};
- size_t actual_devices_count = 1;
+ hailo_device_id_t device_ids[DEVICE_IDS_COUNT];
+ size_t actual_devices_count = DEVICE_IDS_COUNT;
hailo_device device = NULL;
hailo_hef hef = NULL;
hailo_configure_params_t configure_params = {0};
size_t number_output_streams = 0;
size_t index = 0;
- status = hailo_scan_devices(NULL, &device_id, &actual_devices_count);
+ status = hailo_scan_devices(NULL, device_ids, &actual_devices_count);
REQUIRE_SUCCESS(status, l_exit, "Failed to scan devices");
- REQUIRE_ACTION(1 == actual_devices_count, status = HAILO_INVALID_OPERATION, l_exit,
- "Only 1 device on the system is supported on the example");
+ REQUIRE_ACTION(1 <= actual_devices_count, status = HAILO_INVALID_OPERATION, l_exit,
+ "Failed to find a connected hailo device.");
- status = hailo_create_device_by_id(&device_id, &device);
+ status = hailo_create_device_by_id(&(device_ids[0]), &device);
REQUIRE_SUCCESS(status, l_exit, "Failed to create device");
status = hailo_create_hef_file(&hef, HEF_FILE);
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(switch_network_groups_example.c PROPERTIES LANGUAGE C)
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(switch_network_groups_manually_example.c PROPERTIES LANGUAGE C)
for (size_t run_index = 0; run_index < RUN_COUNT; run_index++) {
for (size_t hef_index = 0 ; hef_index < HEF_COUNT; hef_index++) {
// Wait for hef to be activated to send data
- hailo_wait_for_network_group_activation(input_vstream_args->configured_networks[hef_index], HAILO_INFINITE);
+ status = hailo_wait_for_network_group_activation(input_vstream_args->configured_networks[hef_index], HAILO_INFINITE);
+ REQUIRE_SUCCESS(status, l_clear_src, "Failed waiting for network group activation");
// Send data on relevant Hef
for (uint32_t frame = 0; frame < INFER_FRAME_COUNT; frame++) {
for (size_t run_index = 0; run_index < RUN_COUNT; run_index++) {
for (size_t hef_index = 0 ; hef_index < HEF_COUNT; hef_index++) {
- // Wait for hef to be activated to send data
- hailo_wait_for_network_group_activation(output_vstream_args->configured_networks[hef_index], HAILO_INFINITE);
-
+ // Wait for hef to be activated to recv data
+ status = hailo_wait_for_network_group_activation(output_vstream_args->configured_networks[hef_index], HAILO_INFINITE);
+ REQUIRE_SUCCESS(status, l_clear_dst, "Failed waiting for network group activation");
+
for (uint32_t i = 0; i < INFER_FRAME_COUNT; i++) {
// Read data
status = hailo_vstream_read_raw_buffer(output_vstreams[hef_index],
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
SET_SOURCE_FILES_PROPERTIES(vstreams_example.c PROPERTIES LANGUAGE C)
add_subdirectory(vstreams_example)
add_subdirectory(infer_pipeline_example)
add_subdirectory(raw_streams_example)
+add_subdirectory(raw_async_streams_example)
add_subdirectory(multi_network_vstream_example)
add_subdirectory(switch_network_groups_example)
add_subdirectory(switch_network_groups_manually_example)
add_subdirectory(multi_device_example)
add_subdirectory(power_measurement_example)
add_subdirectory(multi_process_example)
+add_subdirectory(notification_callback_example)
add_custom_target(cpp_hailort_examples)
add_dependencies(cpp_hailort_examples
cpp_vstreams_example
cpp_infer_pipeline_example
cpp_raw_streams_example
+ cpp_raw_async_streams_example
cpp_multi_network_vstream_example
cpp_switch_network_groups_example
cpp_switch_network_groups_manually_example
cpp_multi_device_example
cpp_power_measurement_example
- cpp_multi_process_example)
\ No newline at end of file
+ cpp_multi_process_example
+ cpp_notification_callback_example)
\ No newline at end of file
cmake_minimum_required(VERSION 3.0.0)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_infer_pipeline_example infer_pipeline_example.cpp)
target_link_libraries(cpp_infer_pipeline_example PRIVATE HailoRT::libhailort)
#include <iostream>
+
#define HEF_FILE ("hefs/shortcut_net.hef")
constexpr size_t FRAMES_COUNT = 100;
constexpr hailo_format_type_t FORMAT_TYPE = HAILO_FORMAT_TYPE_AUTO;
return make_unexpected(hef.status());
}
- auto configure_params = hef->create_configure_params(HAILO_STREAM_INTERFACE_ETH);
+ auto configure_params = device.create_configure_params(hef.value());
if (!configure_params) {
return make_unexpected(configure_params.status());
}
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_multi_device_example multi_device_example.cpp)
target_link_libraries(cpp_multi_device_example PRIVATE HailoRT::libhailort Threads::Threads)
#include "hailo/hailort.hpp"
#include <iostream>
+#include <thread>
+
#define HEF_FILE ("hefs/shortcut_net.hef")
constexpr size_t FRAMES_COUNT = 100;
return make_unexpected(hef.status());
}
- auto configure_params = hef->create_configure_params(HAILO_STREAM_INTERFACE_PCIE);
+ auto configure_params = vdevice.create_configure_params(hef.value());
if (!configure_params) {
return make_unexpected(configure_params.status());
}
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_multi_network_vstream_example multi_network_vstream_example.cpp)
target_link_libraries(cpp_multi_network_vstream_example PRIVATE HailoRT::libhailort Threads::Threads)
#include "hailo/hailort.hpp"
#include <iostream>
+#include <thread>
+
#define HEF_FILE ("hefs/multi_network_shortcut_net.hef")
constexpr size_t INFER_FRAME_COUNT = 100;
Expected<std::shared_ptr<ConfiguredNetworkGroup>> configure_network_group(VDevice &vdevice, Hef &hef, uint16_t batch_size[NET_COUNT])
{
- auto configure_params = hef.create_configure_params(HAILO_STREAM_INTERFACE_PCIE);
+ auto configure_params = vdevice.create_configure_params(hef);
if (!configure_params) {
std::cerr << "Failed to create configure params" << std::endl;
return make_unexpected(configure_params.status());
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_multi_process_example multi_process_example.cpp)
target_link_libraries(cpp_multi_process_example PRIVATE HailoRT::libhailort Threads::Threads)
#include "hailo/hailort.hpp"
#include <iostream>
+#include <thread>
+
constexpr size_t FRAMES_COUNT = 100;
constexpr bool QUANTIZED = true;
return make_unexpected(hef.status());
}
- auto configure_params = hef->create_configure_params(HAILO_STREAM_INTERFACE_PCIE);
+ auto configure_params = vdevice.create_configure_params(hef.value());
if (!configure_params) {
return make_unexpected(configure_params.status());
}
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
+
+add_executable(cpp_notification_callback_example notification_callback_example.cpp)
+target_link_libraries(cpp_notification_callback_example PRIVATE HailoRT::libhailort)
+
+if(WIN32)
+ target_compile_options(cpp_notification_callback_example PRIVATE
+ /DWIN32_LEAN_AND_MEAN
+ /DNOMINMAX # NOMINMAX is required in order to play nice with std::min/std::max (otherwise Windows.h defines it's own)
+ /wd4201 /wd4251
+ )
+endif()
+
+set_target_properties(cpp_notification_callback_example PROPERTIES CXX_STANDARD 14)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file notification_callback_example.cpp
+ * This example demonstrates the basic usage of notification callbacks.
+ * The program creates a device and then sets and removes a notification callback on it.
+ * In this example the notification is HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM and the callback is a simple print function.
+ **/
+
+#include "hailo/hailort.hpp"
+
+#include <iostream>
+#include <string>
+#include <chrono>
+#include <thread>
+
+
+const std::chrono::seconds SLEEP_DURATION_SECS(2);
+
+using namespace hailort;
+
+int main()
+{
+ auto device_ids = Device::scan();
+ if (!device_ids) {
+ std::cerr << "Failed to scan, status = " << device_ids.status() << std::endl;
+ return device_ids.status();
+ }
+ if (device_ids->size() < 1){
+ std::cerr << "Failed to find a connected hailo device." << std::endl;
+ return HAILO_INVALID_OPERATION;
+ }
+ auto device = Device::create(device_ids->at(0));
+ if (!device) {
+ std::cerr << "Failed to create device " << device.status() << std::endl;
+ return device.status();
+ }
+
+ // Set the callback notification
+ hailo_status status = device.value()->set_notification_callback(
+ [] (Device &device, const hailo_notification_t ¬ification, void* opaque) {
+ std::cout << "got notification with notification id " << notification.id << " - Overcurrent Alarm" << std::endl;
+ std::cout << "device id: " << device.get_dev_id() << std::endl;
+ if(nullptr == opaque)
+ std::cout << "User defined data is null" << std::endl;
+ },
+ HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM, nullptr);
+ if (HAILO_SUCCESS != status) {
+ std::cerr << "Setting notification failed " << status << std::endl;
+ return status;
+ }
+
+ std::cout << "Notification callback has been set - ";
+ std::cout << "in case of overcurrent alarm notification, an overcurrent alarm will be printed" << std::endl;
+ std::this_thread::sleep_for(SLEEP_DURATION_SECS);
+
+ // Remove the callback notification
+ status = device.value()->remove_notification_callback(HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM);
+ if (HAILO_SUCCESS != status) {
+ std::cerr << "Removing notification failed " << status << std::endl;
+ return status;
+ }
+ std::cout << "Notification callback has been removed" << std::endl;
+
+ return HAILO_SUCCESS;
+}
\ No newline at end of file
cmake_minimum_required(VERSION 3.0.0)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_power_measurement_example power_measurement_example.cpp)
target_link_libraries(cpp_power_measurement_example PRIVATE HailoRT::libhailort)
#include <chrono>
#include <thread>
+
#define SAMPLING_PERIOD (HAILO_SAMPLING_PERIOD_1100US)
#define AVERAGE_FACTOR (HAILO_AVERAGE_FACTOR_256)
#define DVM_OPTION (HAILO_DVM_OPTIONS_AUTO) // For current measurement over EVB - pass DVM explicitly (see hailo_dvm_options_t)
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+find_package(Threads REQUIRED)
+set(THREADS_PREFER_PTHREAD_FLAG ON)
+
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
+
+add_executable(cpp_raw_async_streams_example buffer_pool.cpp raw_async_streams_example.cpp)
+target_link_libraries(cpp_raw_async_streams_example PRIVATE HailoRT::libhailort Threads::Threads)
+
+if(WIN32)
+ target_compile_options(cpp_raw_async_streams_example PRIVATE
+ /DWIN32_LEAN_AND_MEAN
+ /DNOMINMAX # NOMINMAX is required in order to play nice with std::min/std::max (otherwise Windows.h defines it's own)
+ /wd4201 /wd4251
+ )
+endif()
+
+set_target_properties(cpp_raw_async_streams_example PROPERTIES CXX_STANDARD 14)
\ No newline at end of file
--- /dev/null
+/**\r
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file buffer_pool.cpp\r
+ * @brief Implementation of vdma buffer pool\r
+ **/\r
+\r
+#include "buffer_pool.hpp"\r
+#include "hailo/hailort.h"\r
+#include "hailo/expected.hpp"\r
+\r
+Expected<BufferPoolPtr> BufferPool::create(size_t num_buffers, size_t buffer_size,\r
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device)\r
+{\r
+ std::queue<std::shared_ptr<DmaMappedBuffer>> queue;\r
+ for (auto i = 0; i < num_buffers; i++) {\r
+ auto mapped_buffer = DmaMappedBuffer::create(buffer_size, data_direction_flags, device);\r
+ if (!mapped_buffer) {\r
+ return make_unexpected(mapped_buffer.status());\r
+ }\r
+\r
+ auto mapped_buffer_ptr = std::make_shared<DmaMappedBuffer>(mapped_buffer.release());\r
+ if (nullptr == mapped_buffer_ptr) {\r
+ return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);\r
+ }\r
+\r
+ queue.push(mapped_buffer_ptr);\r
+ }\r
+ \r
+ auto result = std::make_shared<BufferPool>(num_buffers, std::move(queue));\r
+ if (nullptr == result) {\r
+ return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);\r
+ }\r
+\r
+ return result;\r
+}\r
+\r
+BufferPool::BufferPool(size_t max_size, std::queue<std::shared_ptr<DmaMappedBuffer>> &&queue) :\r
+ m_max_size(max_size),\r
+ m_mutex(),\r
+ m_cv(),\r
+ m_queue(queue)\r
+{}\r
+\r
+BufferPool::~BufferPool()\r
+{\r
+ m_cv.notify_all();\r
+}\r
+\r
+std::shared_ptr<DmaMappedBuffer> BufferPool::dequeue()\r
+{\r
+ std::unique_lock<std::mutex> lock(m_mutex);\r
+ m_cv.wait(lock, [this] { return m_queue.size() > 0; });\r
+ auto buffer = m_queue.front();\r
+ m_queue.pop();\r
+\r
+ return buffer;\r
+}\r
+void BufferPool::enqueue(std::shared_ptr<DmaMappedBuffer> buffer)\r
+{\r
+ {\r
+ std::unique_lock<std::mutex> lock(m_mutex);\r
+ m_cv.wait(lock, [this] { return m_max_size > m_queue.size(); });\r
+ m_queue.push(buffer);\r
+ }\r
+\r
+ m_cv.notify_one();\r
+}\r
+\r
+void BufferPool::wait_for_pending_buffers()\r
+{\r
+ std::unique_lock<std::mutex> lock(m_mutex);\r
+ m_cv.wait(lock, [this] { return m_max_size == m_queue.size(); });\r
+}\r
--- /dev/null
+/**\r
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file buffer_pool.hpp\r
+ * @brief Pool of vdma mapped buffers, allowing FIFO queue access to buffers\r
+ **/\r
+\r
+#ifndef _HAILO_BUFFER_POOL_HPP_\r
+#define _HAILO_BUFFER_POOL_HPP_\r
+\r
+#include "hailo/hailort.hpp"\r
+#include "hailo/expected.hpp"\r
+\r
+#include <memory>\r
+#include <mutex>\r
+#include <condition_variable>\r
+#include <queue>\r
+\r
+\r
+using namespace hailort;\r
+\r
+class BufferPool;\r
+using BufferPoolPtr = std::shared_ptr<BufferPool>;\r
+\r
+class BufferPool final\r
+{\r
+public:\r
+ static Expected<BufferPoolPtr> create(size_t num_buffers, size_t buffer_size,\r
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device);\r
+ BufferPool(size_t max_size, std::queue<std::shared_ptr<DmaMappedBuffer>> &&queue);\r
+ BufferPool(BufferPool &&) = delete;\r
+ BufferPool(const BufferPool &) = delete;\r
+ BufferPool &operator=(BufferPool &&) = delete;\r
+ BufferPool &operator=(const BufferPool &) = delete;\r
+ ~BufferPool();\r
+\r
+ std::shared_ptr<DmaMappedBuffer> dequeue();\r
+ void enqueue(std::shared_ptr<DmaMappedBuffer> buffer);\r
+ void wait_for_pending_buffers();\r
+\r
+private:\r
+ const size_t m_max_size;\r
+ std::mutex m_mutex;\r
+ std::condition_variable m_cv;\r
+ std::queue<std::shared_ptr<DmaMappedBuffer>> m_queue;\r
+};\r
+\r
+#endif /* _HAILO_BUFFER_POOL_HPP_ */\r
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file raw_async_streams_example
+ * This example demonstrates using low level async streams over c++
+ **/
+
+#include "hailo/hailort.hpp"
+#include "buffer_pool.hpp"
+
+#include <thread>
+#include <iostream>
+
+
+constexpr size_t FRAMES_COUNT = 10000;
+constexpr size_t BUFFER_POOL_SIZE = 10;
+constexpr auto TIMEOUT = std::chrono::milliseconds(1000);
+
+using namespace hailort;
+
+Expected<std::shared_ptr<ConfiguredNetworkGroup>> configure_network_group(Device &device, const std::string &hef_path)
+{
+ auto hef = Hef::create(hef_path);
+ if (!hef) {
+ return make_unexpected(hef.status());
+ }
+
+ auto configure_params = device.create_configure_params(hef.value());
+ if (!configure_params) {
+ return make_unexpected(configure_params.status());
+ }
+
+ // change stream_params here
+ for (auto &ng_name_params_pair : *configure_params) {
+ for (auto &stream_params_name_pair : ng_name_params_pair.second.stream_params_by_name) {
+ stream_params_name_pair.second.flags = HAILO_STREAM_FLAGS_ASYNC;
+ }
+ }
+
+ auto network_groups = device.configure(hef.value(), configure_params.value());
+ if (!network_groups) {
+ return make_unexpected(network_groups.status());
+ }
+
+ if (1 != network_groups->size()) {
+ std::cerr << "Invalid amount of network groups" << std::endl;
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+
+ return std::move(network_groups->at(0));
+}
+
+void read_all(OutputStream &output, BufferPoolPtr buffer_pool, size_t frames_to_read, hailo_status &status)
+{
+ for (size_t i = 0; i < frames_to_read; i++) {
+ status = output.wait_for_ready(output.get_frame_size(), TIMEOUT);
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+ status = output.read_async(buffer_pool->dequeue(),
+ [buffer_pool](std::shared_ptr<DmaMappedBuffer> buffer, const hailo_async_transfer_completion_info_t &, void *) {
+ buffer_pool->enqueue(buffer);
+ });
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+ }
+}
+
+void write_all(InputStream &input, BufferPoolPtr buffer_pool, size_t frames_to_write, hailo_status &status)
+{
+ for (size_t i = 0; i < frames_to_write; i++) {
+ status = input.wait_for_ready(input.get_frame_size(), TIMEOUT);
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+ status = input.write_async(buffer_pool->dequeue(),
+ [buffer_pool](std::shared_ptr<DmaMappedBuffer> buffer, const hailo_async_transfer_completion_info_t &, void *) {
+ buffer_pool->enqueue(buffer);
+ });
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+ }
+}
+
+int main()
+{
+ auto device = Device::create();
+ if (!device) {
+ std::cerr << "Failed create device " << device.status() << std::endl;
+ return device.status();
+ }
+
+ static const auto HEF_FILE = "hefs/shortcut_net.hef";
+ auto network_group = configure_network_group(*device.value(), HEF_FILE);
+ if (!network_group) {
+ std::cerr << "Failed to configure network group" << HEF_FILE << std::endl;
+ return network_group.status();
+ }
+
+ auto activated_network_group = network_group.value()->activate();
+ if (!activated_network_group) {
+ std::cerr << "Failed to activate network group " << activated_network_group.status() << std::endl;
+ return activated_network_group.status();
+ }
+
+ // Assume one input and output
+ auto output = network_group->get()->get_output_streams()[0];
+ auto input = network_group->get()->get_input_streams()[0];
+
+ auto output_buffer_pool = BufferPool::create(BUFFER_POOL_SIZE, output.get().get_frame_size(), HAILO_VDMA_BUFFER_DIRECTION_FLAGS_D2H, *device.value());
+ if (!output_buffer_pool) {
+ std::cerr << "Failed to create output buffer pool" << std::endl;
+ return output_buffer_pool.status();
+ }
+ hailo_status output_status = HAILO_UNINITIALIZED;
+ auto output_thread = std::make_unique<std::thread>(read_all, output, output_buffer_pool.value(), FRAMES_COUNT, std::ref(output_status));
+
+ auto input_buffer_pool = BufferPool::create(BUFFER_POOL_SIZE, input.get().get_frame_size(), HAILO_VDMA_BUFFER_DIRECTION_FLAGS_H2D, *device.value());
+ if (!input_buffer_pool) {
+ std::cerr << "Failed to create input buffer pool" << std::endl;
+ return input_buffer_pool.status();
+ }
+ hailo_status input_status = HAILO_UNINITIALIZED;
+ auto input_thread = std::make_unique<std::thread>(write_all, input, input_buffer_pool.value(), FRAMES_COUNT, std::ref(input_status));
+
+ // Join threads
+ input_thread->join();
+ output_thread->join();
+ if (HAILO_SUCCESS != input_status) {
+ return input_status;
+ }
+ if (HAILO_SUCCESS != output_status) {
+ return output_status;
+ }
+
+ // The read/write threads have completed but the transfers issued by them haven't necessarily completed.
+ // We'll wait for the output buffer queue to fill back up, since the callback we registered enqueues buffers
+ // back to the pool + we issued the same number of reads as writes
+ output_buffer_pool.value()->wait_for_pending_buffers();
+
+ return HAILO_SUCCESS;
+}
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_raw_streams_example raw_streams_example.cpp)
target_link_libraries(cpp_raw_streams_example PRIVATE HailoRT::libhailort Threads::Threads)
#include "hailo/hailort.hpp"
#include <iostream>
+#include <thread>
+
#define HEF_FILE ("hefs/shortcut_net.hef")
constexpr size_t FRAMES_COUNT = 100;
return make_unexpected(hef.status());
}
- auto stream_interface = device.get_default_streams_interface();
- if (!stream_interface) {
- return make_unexpected(stream_interface.status());
- }
-
- auto configure_params = hef->create_configure_params(stream_interface.value());
+ auto configure_params = device.create_configure_params(hef.value());
if (!configure_params) {
return make_unexpected(configure_params.status());
}
int main()
{
- /*
- For simplicity, not passing `device_id` - This function will fail in case more than one device is present.
- See `hailort::Device::scan_devices` and `hailort::Device::create` functions documentation.
- */
- auto device = Device::create();
+ auto device_ids = Device::scan();
+ if (!device_ids) {
+ std::cerr << "Failed to scan, status = " << device_ids.status() << std::endl;
+ return device_ids.status();
+ }
+ if (device_ids->size() < 1){
+ std::cerr << "Failed to find a connected hailo device." << std::endl;
+ return HAILO_INVALID_OPERATION;
+ }
+ auto device = Device::create(device_ids->at(0));
if (!device) {
- std::cerr << "Failed create device " << device.status() << std::endl;
+ std::cerr << "Failed to create device " << device.status() << std::endl;
return device.status();
}
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_switch_network_groups_example switch_network_groups_example.cpp)
target_link_libraries(cpp_switch_network_groups_example PRIVATE HailoRT::libhailort Threads::Threads)
#include <iostream>
#include <chrono>
+#include <thread>
+
constexpr bool QUANTIZED = true;
constexpr hailo_format_type_t FORMAT_TYPE = HAILO_FORMAT_TYPE_AUTO;
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_switch_network_groups_manually_example switch_network_groups_manually_example.cpp)
target_link_libraries(cpp_switch_network_groups_manually_example PRIVATE HailoRT::libhailort Threads::Threads)
#include <iostream>
#include <chrono>
+#include <thread>
+
constexpr bool QUANTIZED = true;
constexpr hailo_format_type_t FORMAT_TYPE = HAILO_FORMAT_TYPE_AUTO;
find_package(Threads REQUIRED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
-find_package(HailoRT 4.12.1 EXACT REQUIRED)
+find_package(HailoRT 4.13.0 EXACT REQUIRED)
add_executable(cpp_vstreams_example vstreams_example.cpp)
target_link_libraries(cpp_vstreams_example PRIVATE HailoRT::libhailort Threads::Threads)
#include "hailo/hailort.hpp"
#include <iostream>
+#include <thread>
+
#define HEF_FILE ("hefs/shortcut_net.hef")
constexpr size_t FRAMES_COUNT = 100;
return make_unexpected(hef.status());
}
- auto configure_params = hef->create_configure_params(HAILO_STREAM_INTERFACE_PCIE);
+ auto configure_params = vdevice.create_configure_params(hef.value());
if (!configure_params) {
return make_unexpected(configure_params.status());
}
KO_RUN_ASAP = 12;
HAILO_NET_FLOW = 13;
HAILO_NET_FLOW_YOLO_NMS = 14;
+ HAILO_NET_FLOW_YOLOX_NMS = 15;
+ HAILO_NET_FLOW_SSD_NMS = 16;
+ HAILO_NET_FLOW_IOU_NMS = 17;
UNUSED = 0XFFFF;
}
PROTO__HW_ARCH__SAGE_A0 = 100;
PROTO__HW_ARCH__SAGE_B0 = 101;
PROTO__HW_ARCH__PAPRIKA_B0 = 102;
- PROTO__HW_ARCH__MERCURY = 103;
+ PROTO__HW_ARCH__HAILO15H = 103;
PROTO__HW_ARCH__GINGER = 104;
PROTO__HW_ARCH__LAVENDER = 105;
}
repeated ProtoHEFYoloBboxDecoder bbox_decoders = 4;
};
+message ProtoHEFSSDBboxDecoder {
+ // List of Height coordinates (given as fraction of input size), defining each box dimensions around the anchor coordinates
+ repeated float h = 1;
+
+ // List of Width coordinates (given as fraction of input size), defining each box dimensions around the anchor coordinates
+ repeated float w = 2;
+
+ // Index of the pad connected to the encoded layer in the decoder (reg layer)
+ uint32 reg_pad_index = 3;
+
+ // Index of the pad connected to the classes scores layer in the decoder (cls layer)
+ uint32 cls_pad_index = 4;
+};
+
+message ProtoHEFSSDNmsOp {
+ // Input image dimensions
+ double image_height = 1;
+ double image_width = 2;
+
+ // Values used for compensation of rescales done in the training phase (derived from faster_rcnn architecture). This param rescales anchors centers.
+ uint32 centers_scale_factor = 3;
+
+ // Values used for compensation of rescales done in the training phase (derived from faster_rcnn architecture). This param rescales anchors dimensions.
+ uint32 bbox_dimensions_scale_factor = 4;
+
+ // Regression layer input order into bbox decoder
+ uint32 ty = 5;
+ uint32 tx = 6;
+ uint32 th = 7;
+ uint32 tw = 8;
+
+ // List of bbox decoders (anchors) for the NMS layer. Each model has its own number of boxes per anchor
+ repeated ProtoHEFSSDBboxDecoder bbox_decoders = 9;
+};
+
+message ProtoHEFIOUNmsOp {};
+
message ProtoHEFNmsOp {
// NMS score threshold
double nms_score_th = 1;
// Additional information needed for specific NMS types
oneof nms_op {
- ProtoHEFYoloNmsOp yolo_nms_op = 7;
+ ProtoHEFYoloNmsOp yolo_nms_op = 7; // YOLOv5 post process
+ ProtoHEFYoloNmsOp yolox_nms_op = 8; // YOLO-X post process (ignores bbox decoder coordinations)
+ ProtoHEFSSDNmsOp ssd_nms_op = 9; // SSD post process
+ ProtoHEFIOUNmsOp iou_op = 10; // IoU only
}
};
// Pad's unique index
uint32 index = 1;
- // Pad's name, can be empty of meaningful
+ // Pad's name, can be empty or meaningful
string name = 2;
// Additional information describing the data going through this pad's interface
// Op type for a subgraph that is running on Hailo's core
ProtoHEFCoreOp core_op = 4;
- // Op type for NMS post-processing
+ // Op type for NMS post-process
ProtoHEFNmsOp nms_op = 5;
}
};
#include <string>
#include <cassert>
+
namespace hailort
{
// Note: If this->size() is less than sizeof(T), then part of the data pointed to by the returned pointer
// will be outside of the buffer's bounds.
template<typename T, std::enable_if_t<std::is_pod<T>::value, int> = 0>
- T* as_pointer()
+ T* as_pointer() const
{
assert(m_size >= sizeof(T));
return reinterpret_cast<T*>(m_data.get());
#include <memory>
#include <chrono>
+
namespace hailort
{
enum class Type {
PCIE = 0,
ETH,
- CORE
+ INTEGRATED
};
/**
*/
static Expected<Type> get_device_type(const std::string &device_id);
+ /**
+ * Create the default configure params from an hef.
+ *
+ * @param[in] hef A reference to an Hef object to create configure params by
+ * @return Upon success, returns Expected of a NetworkGroupsParamsMap (map of string and ConfiguredNetworkParams).
+ * Otherwise, returns Unexpected of ::hailo_status error.
+ */
+ Expected<NetworkGroupsParamsMap> create_configure_params(Hef &hef) const;
+
+ /**
+ * Create the default configure params from an hef.
+ *
+ * @param[in] hef A reference to an Hef object to create configure params by
+ * @param[in] network_group_name Name of network_group to make configure params for.
+ * @return Upon success, returns Expected of a NetworkGroupsParamsMap (map of string and ConfiguredNetworkParams).
+ * Otherwise, returns Unexpected of ::hailo_status error.
+ */
+ Expected<ConfigureNetworkParams> create_configure_params(Hef &hef, const std::string &network_group_name) const;
+
/**
* Configure the device from an hef.
*
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file dma_mapped_buffer.hpp
+ * @brief The mapped buffer that is continuous in virtual memory, but not on physical memory.
+ * We map the buffer to the IOMMU.
+ *
+ * The buffer can be used only with the help of a descriptors list that contains pointers to a physical
+ * continuous "dma pages".
+ *
+ * There are 2 options to allocated the buffer:
+ * 1. User mode allocation - the user mode calls `malloc` or `mmap` to allocate the buffer, then
+ * using HailoRTDriver we map the driver to the IOMMU (and pin the pages to avoid pagigs).
+ * This is the default option
+ * 2. Kernel mode allocation - on some systems, the user mode doesn't allocate the memory in a "dma-able" address,
+ * so we need to allocate the pages in driver.
+ **/
+
+#ifndef _HAILO_DMA_MAPPED_BUFFER_HPP_
+#define _HAILO_DMA_MAPPED_BUFFER_HPP_
+
+#include "hailo/expected.hpp"
+#include "hailo/device.hpp"
+
+
+namespace hailort {
+
+// Forward deceleration across namespaces
+namespace vdma {
+ class DescriptorList;
+ class MappedBufferFactory;
+ class BufferedChannel;
+}
+
+// ******************************************** NOTE ******************************************** //
+// Async Stream API and DmaMappedBuffer are currently not supported and are for internal use only //
+// ********************************************************************************************** //
+class HAILORTAPI DmaMappedBuffer final
+{
+public:
+ static Expected<DmaMappedBuffer> create(size_t size,
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device);
+ // TODO: doc that the addr needs to be on a new page and aligned to 64B (HRT-9559)
+ // probably best just to call mmap
+ static Expected<DmaMappedBuffer> create_from_user_address(void *user_address, size_t size,
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device);
+
+ DmaMappedBuffer(const DmaMappedBuffer &other) = delete;
+ DmaMappedBuffer &operator=(const DmaMappedBuffer &other) = delete;
+ DmaMappedBuffer(DmaMappedBuffer &&other) noexcept;
+ DmaMappedBuffer &operator=(DmaMappedBuffer &&other) = delete;
+ ~DmaMappedBuffer();
+
+ void *user_address();
+ size_t size() const;
+ hailo_status synchronize();
+
+private:
+ static Expected<DmaMappedBuffer> create(void *user_address, size_t size,
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device);
+
+ // Need access to pimpl
+ friend class vdma::DescriptorList;
+ friend class vdma::MappedBufferFactory;
+ friend class vdma::BufferedChannel;
+
+ class Impl;
+ explicit DmaMappedBuffer(std::unique_ptr<Impl> pimpl);
+ std::unique_ptr<Impl> pimpl;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_DMA_MAPPED_BUFFER_HPP_ */
\ No newline at end of file
#include <memory>
#include <vector>
-#include <array>
#include <chrono>
#if defined(__GNUC__)
#include <poll.h>
#include <atomic>
#include <mutex>
+
// Forward declare neosmart::neosmart_event_t_
namespace neosmart {
struct neosmart_event_t_;
#define _HAILO_EXPECTED_HPP_
#include "hailo/hailort.h"
+
#include <assert.h>
#include <utility>
#include <type_traits>
+
namespace hailort
{
return value();
}
+ const T& operator*() const&
+ {
+ assert(has_value());
+ return value();
+ }
+
/**
* Checks whether the object contains a value.
*/
extern "C" {
#endif
+#include "platform.h"
+
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#include <limits.h>
-#include "platform.h"
/** @defgroup group_defines HailoRT API definitions
* @{
#define HAILO_UNIQUE_VDEVICE_GROUP_ID ("UNIQUE")
#define HAILO_DEFAULT_VDEVICE_GROUP_ID HAILO_UNIQUE_VDEVICE_GROUP_ID
+#define HAILO_SCHEDULER_PRIORITY_NORMAL (16)
+#define HAILO_SCHEDULER_PRIORITY_MAX (31)
+#define HAILO_SCHEDULER_PRIORITY_MIN (0)
+
typedef float float32_t;
typedef double float64_t;
typedef uint16_t nms_bbox_counter_t;
HAILO_STATUS__X(33, HAILO_ATR_TABLES_CONF_VALIDATION_FAIL /*!< Validating address translation tables failure, for FW control use */)\
HAILO_STATUS__X(34, HAILO_CONTROL_EVENT_CREATE_FAIL /*!< Creating control event failure */)\
HAILO_STATUS__X(35, HAILO_READ_EVENT_FAIL /*!< Reading event failure */)\
- HAILO_STATUS__X(36, HAILO_PCIE_DRIVER_FAIL /*!< PCIE driver failure */)\
+ HAILO_STATUS__X(36, HAILO_DRIVER_FAIL /*!< Driver failure */)\
HAILO_STATUS__X(37, HAILO_INVALID_FIRMWARE_MAGIC /*!< Invalid FW magic */)\
HAILO_STATUS__X(38, HAILO_INVALID_FIRMWARE_CODE_SIZE /*!< Invalid FW code size */)\
HAILO_STATUS__X(39, HAILO_INVALID_KEY_CERTIFICATE_SIZE /*!< Invalid key certificate size */)\
typedef enum {
HAILO_DEVICE_TYPE_PCIE,
HAILO_DEVICE_TYPE_ETH,
- HAILO_DEVICE_TYPE_CORE,
+ HAILO_DEVICE_TYPE_INTEGRATED,
/** Max enum value to maintain ABI Integrity */
HAILO_DEVICE_TYPE_MAX_ENUM = HAILO_MAX_ENUM
HAILO_ARCH_HAILO8_A0 = 0,
HAILO_ARCH_HAILO8,
HAILO_ARCH_HAILO8L,
- HAILO_ARCH_MERCURY_CA,
- HAILO_ARCH_MERCURY_VPU,
+ HAILO_ARCH_HAILO15,
/** Max enum value to maintain ABI Integrity */
HAILO_ARCH_MAX_ENUM = HAILO_MAX_ENUM
*/
HAILO_FORMAT_ORDER_RGB4 = 17,
+ /**
+ * YUV format, encoding 8 pixels in 96 bits
+ * [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, U0, U1, V0, V1] represents
+ * [Y0, U0, V0,], [Y1, U0, V0], [Y2, U0, V0], [Y3, U0, V0], [Y4, U1, V1], [Y5, U1, V1], [Y6, U1, V1], [Y7, U1, V1]
+ */
+ HAILO_FORMAT_ORDER_I420 = 18,
+
+ /**
+ * Internal implementation for HAILO_FORMAT_ORDER_I420 format
+ * [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, U0, U1, V0, V1] is represented by [Y0, Y1, Y2, Y3, U0, V0, Y4, Y5, Y6, Y7, U1, V1]
+ */
+ HAILO_FORMAT_ORDER_HAILO_YYYYUV = 19,
+
/** Max enum value to maintain ABI Integrity */
HAILO_FORMAT_ORDER_MAX_ENUM = HAILO_MAX_ENUM
} hailo_format_order_t;
HAILO_H2D_STREAM = 0,
HAILO_D2H_STREAM = 1,
+ /** Max enum value to maintain ABI Integrity */
HAILO_STREAM_DIRECTION_MAX_ENUM = HAILO_MAX_ENUM
} hailo_stream_direction_t;
+// ******************************************** NOTE ******************************************** //
+// Async Stream API and DmaMappedBuffer are currently not supported and are for internal use only //
+// ********************************************************************************************** //
+/** Stream flags */
+typedef enum {
+ HAILO_STREAM_FLAGS_NONE = 0, /*!< No flags */
+ HAILO_STREAM_FLAGS_ASYNC = 1 << 0, /*!< Async stream */
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_STREAM_FLAGS_MAX_ENUM = HAILO_MAX_ENUM
+} hailo_stream_flags_t;
+
+/** Hailo vdma buffer direction */
+typedef enum {
+ HAILO_VDMA_BUFFER_DIRECTION_FLAGS_NONE = 0,
+ HAILO_VDMA_BUFFER_DIRECTION_FLAGS_H2D = 1 << 0,
+ HAILO_VDMA_BUFFER_DIRECTION_FLAGS_D2H = 1 << 1,
+
+ /** Max enum value to maintain ABI Integrity */
+ HAILO_VDMA_BUFFER_DIRECTION_FLAGS_MAX_ENUM = HAILO_MAX_ENUM
+} hailo_vdma_buffer_direction_flags_t;
+
/** Input or output data transform parameters */
typedef struct {
hailo_stream_transform_mode_t transform_mode;
/** Core input stream (host to device) parameters */
typedef struct {
EMPTY_STRUCT_PLACEHOLDER
-} hailo_core_input_stream_params_t;
+} hailo_integrated_input_stream_params_t;
/** Core output stream (device to host) parameters */
typedef struct {
EMPTY_STRUCT_PLACEHOLDER
-} hailo_core_output_stream_params_t;
+} hailo_integrated_output_stream_params_t;
typedef enum {
HAILO_STREAM_INTERFACE_PCIE = 0,
HAILO_STREAM_INTERFACE_ETH,
HAILO_STREAM_INTERFACE_MIPI,
- HAILO_STREAM_INTERFACE_CORE,
+ HAILO_STREAM_INTERFACE_INTEGRATED,
/** Max enum value to maintain ABI Integrity */
HAILO_STREAM_INTERFACE_MAX_ENUM = HAILO_MAX_ENUM
typedef struct {
hailo_stream_interface_t stream_interface;
hailo_stream_direction_t direction;
+ hailo_stream_flags_t flags;
union {
hailo_pcie_input_stream_params_t pcie_input_params;
- hailo_core_input_stream_params_t core_input_params;
+ hailo_integrated_input_stream_params_t integrated_input_params;
hailo_eth_input_stream_params_t eth_input_params;
hailo_mipi_input_stream_params_t mipi_input_params;
hailo_pcie_output_stream_params_t pcie_output_params;
- hailo_core_output_stream_params_t core_output_params;
+ hailo_integrated_output_stream_params_t integrated_output_params;
hailo_eth_output_stream_params_t eth_output_params;
};
} hailo_stream_parameters_t;
} hailo_bbox_float32_t;
#pragma pack(pop)
+typedef struct {
+ /**
+ * - HAILO_SUCCESS when transfer is complete
+ * - HAILO_STREAM_NOT_ACTIVATED due to stream deactivation
+ */
+ hailo_status status;
+} hailo_async_transfer_completion_info_t;
+
/**
* Input or output stream information. In case of multiple inputs or outputs, each one has
* its own stream.
HAILORTAPI hailo_status hailo_set_scheduler_threshold(hailo_configured_network_group configured_network_group,
uint32_t threshold, const char *network_name);
+/**
+ * Sets the priority of the network.
+ * When the network group scheduler will choose the next network, networks with higher priority will be prioritized in the selection.
+ * bigger number represent higher priority.
+ *
+ * @param[in] configured_network_group NetworkGroup for which to set the scheduler priority.
+ * @param[in] priority Priority as a number between HAILO_SCHEDULER_PRIORITY_MIN - HAILO_SCHEDULER_PRIORITY_MAX.
+ * @param[in] network_name Network name for which to set the priority.
+ * If NULL is passed, the priority will be set for all the networks in the network group.
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note Using this function is only allowed when scheduling_algorithm is not ::HAILO_SCHEDULING_ALGORITHM_NONE.
+ * @note The default priority is HAILO_SCHEDULER_PRIORITY_NORMAL.
+ * @note Currently, setting the priority for a specific network is not supported.
+ */
+HAILORTAPI hailo_status hailo_set_scheduler_priority(hailo_configured_network_group configured_network_group,
+ uint8_t priority, const char *network_name);
+
/** @} */ // end of group_network_group_functions
/** @defgroup group_stream_functions Stream functions
#define _HAILORT_HPP_
#include "hailo/hailort.h"
+#include "hailo/hailort_common.hpp"
#include "hailo/hef.hpp"
#include "hailo/device.hpp"
#include "hailo/vdevice.hpp"
#include "hailo/expected.hpp"
#include "hailo/buffer.hpp"
#include "hailo/event.hpp"
-#include "hailo/hailort_common.hpp"
#include "hailo/runtime_statistics.hpp"
#include "hailo/network_rate_calculator.hpp"
#include "hailo/quantization.hpp"
+#include "hailo/dma_mapped_buffer.hpp"
+#include "hailo/hailort_defaults.hpp"
#endif /* _HAILORT_HPP_ */
#include "hailo/hailort.h"
#include "hailo/expected.hpp"
+
#include <cmath>
#include <chrono>
#include <string>
#include <vector>
+
namespace hailort
{
}
}
+ /**
+ * Gets a string reprenestation of the given device architecture.
+ *
+ * @param[in] arch A ::hailo_device_architecture_t object.
+ * @return The string representation of the device architecture.
+ */
+ static std::string get_device_arch_str(const hailo_device_architecture_t &arch)
+ {
+ switch (arch)
+ {
+ case HAILO_ARCH_HAILO8_A0:
+ return "HAILO8_A0";
+ case HAILO_ARCH_HAILO8:
+ return "HAILO8";
+ case HAILO_ARCH_HAILO8L:
+ return "HAILO8L";
+ case HAILO_ARCH_HAILO15:
+ return "HAILO15";
+ default:
+ return "UNKNOWN ARCHITECTURE";
+ }
+ }
+
/**
* Gets a string reprenestation of the given format order.
*
return "YYVU";
case HAILO_FORMAT_ORDER_RGB4:
return "RGB4";
+ case HAILO_FORMAT_ORDER_I420:
+ return "I420";
+ case HAILO_FORMAT_ORDER_HAILO_YYYYUV:
+ return "YYYYUV";
default:
return "Nan";
}
static constexpr bool is_vdma_stream_interface(hailo_stream_interface_t stream_interface)
{
- return (HAILO_STREAM_INTERFACE_PCIE == stream_interface) || (HAILO_STREAM_INTERFACE_CORE == stream_interface);
+ return (HAILO_STREAM_INTERFACE_PCIE == stream_interface) || (HAILO_STREAM_INTERFACE_INTEGRATED == stream_interface);
}
static Expected<hailo_device_id_t> to_device_id(const std::string &device_id);
return a;
}
+inline bool is_bit_set(uint32_t num, uint8_t i)
+{
+ return (1 == ((num >> i) & 1));
+}
+
} /* namespace hailort */
#endif /* _HAILO_HAILORT_COMMON_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_defaults.hpp
+ * @brief
+ **/
+
+#ifndef _HAILO_HAILORT_DEFAULTS_HPP_
+#define _HAILO_HAILORT_DEFAULTS_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/network_group.hpp"
+
+
+namespace hailort
+{
+
+#define HAILO_DEFAULT_NETWORK_NAME_QUALIFIER (std::string("/"))
+
+
+class HAILORTAPI HailoRTDefaults final
+{
+public:
+ HailoRTDefaults() = delete;
+
+ static Expected<hailo_format_order_t> get_device_format_order(uint32_t compiler_format_order);
+ static hailo_format_order_t get_default_host_format_order(const hailo_format_t &device_format);
+
+ static hailo_format_t expand_auto_format(const hailo_format_t &host_format, const hailo_format_t &hw_format);
+ static hailo_format_t get_user_buffer_format();
+ static hailo_format_t get_user_buffer_format(bool quantized, hailo_format_type_t format_type);
+
+ static hailo_transform_params_t get_transform_params(bool quantized, hailo_format_type_t format_type);
+ static hailo_transform_params_t get_transform_params(const hailo_stream_info_t &stream_info);
+ static hailo_transform_params_t get_transform_params();
+
+ static hailo_vstream_params_t get_vstreams_params();
+ static hailo_vstream_params_t get_vstreams_params(bool quantized, hailo_format_type_t format_type);
+
+ static Expected<hailo_stream_parameters_t> get_stream_parameters(hailo_stream_interface_t interface,
+ hailo_stream_direction_t direction);
+
+ static ConfigureNetworkParams get_configure_params(uint16_t batch_size = HAILO_DEFAULT_BATCH_SIZE,
+ hailo_power_mode_t power_mode = HAILO_POWER_MODE_PERFORMANCE);
+ static hailo_network_parameters_t get_network_parameters(uint16_t batch_size = HAILO_DEFAULT_BATCH_SIZE);
+ static std::string get_network_name(const std::string &net_group_name);
+ static hailo_activate_network_group_params_t get_active_network_group_params();
+
+ static hailo_vdevice_params_t get_vdevice_params();
+
+private:
+ static struct sockaddr_in get_sockaddr();
+ static hailo_eth_input_stream_params_t get_eth_input_stream_params();
+ static hailo_eth_output_stream_params_t get_eth_output_stream_params();
+ static hailo_pcie_input_stream_params_t get_pcie_input_stream_params();
+ static hailo_pcie_output_stream_params_t get_pcie_output_stream_params();
+ static hailo_integrated_input_stream_params_t get_integrated_input_stream_params();
+ static hailo_integrated_output_stream_params_t get_integrated_output_stream_params();
+ static hailo_mipi_input_stream_params_t get_mipi_input_stream_params();
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_HAILORT_DEFAULTS_HPP_ */
#include <memory>
#include <map>
+
namespace hailort
{
*/
Expected<float64_t> get_bottleneck_fps(const std::string &net_group_name="");
+ /**
+ * Get device Architecture HEF was compiled for.
+ *
+ * @return Upon success, returns Expected containing the device architecture the HEF was compiled for.
+ * Otherwise, returns Unexpected of ::hailo_status error.
+ */
+ Expected<hailo_device_architecture_t> get_hef_device_arch();
+
+ /**
+ * Get string of device architecture HEF was compiled for.
+ *
+ * @param[in] arch hailo_device_architecture_t representing the device architecture of the HEF
+ * @return Upon success, returns string representing the device architecture the HEF was compiled for.
+ * Otherwise, returns Unexpected of ::hailo_status error.
+ */
+ static Expected<std::string> device_arch_to_string(const hailo_device_architecture_t arch);
+
/**
* Gets all stream names under the given vstream name
*
* the function returns the output virtual stream params of the given network.
* If NULL is passed, the function returns the output virtual stream params of
* all the networks of the first network group.
- * @param[in] quantized Whether the data fed into the chip is already quantized. True means
+ * @param[in] quantized Whether the data returned from the chip is already quantized. True means
* the data is already quantized. False means it's HailoRT's responsibility
* to quantize (scale) the data.
* @param[in] format_type The default format type for all output virtual streams.
*/
std::string hash() const;
+ Expected<std::string> get_hef_description(bool stream_infos, bool vstream_infos);
+
~Hef();
Hef(Hef &&);
Hef &operator=(Hef &&);
friend class OutputStream;
friend class PyhailortInternal;
friend class ConfiguredNetworkGroupBase;
+ friend class CoreOp;
friend class VDeviceBase;
#ifdef HAILO_SUPPORT_MULTI_PROCESS
#include "hailo/vstream.hpp"
-#include <unordered_map>
-#include <chrono>
namespace hailort
{
m_inputs(std::move(other.m_inputs)),
m_outputs(std::move(other.m_outputs)),
m_is_multi_context(std::move(other.m_is_multi_context)),
+ m_is_scheduled(std::move(other.m_is_scheduled)),
m_network_name_to_input_count(std::move(other.m_network_name_to_input_count)),
m_network_name_to_output_count(std::move(other.m_network_name_to_output_count)),
m_batch_size(std::move(other.m_batch_size))
{};
private:
- InferVStreams(std::vector<InputVStream> &&inputs, std::vector<OutputVStream> &&outputs, bool is_multi_context, uint16_t batch_size);
+ InferVStreams(std::vector<InputVStream> &&inputs, std::vector<OutputVStream> &&outputs, bool is_multi_context,
+ bool is_scheduled, uint16_t batch_size);
hailo_status verify_network_inputs_and_outputs(const std::map<std::string, MemoryView>& inputs_name_mem_view_map,
const std::map<std::string, MemoryView>& outputs_name_mem_view_map);
hailo_status verify_memory_view_size(const std::map<std::string, MemoryView>& inputs_name_mem_view_map,
std::vector<InputVStream> m_inputs;
std::vector<OutputVStream> m_outputs;
- bool m_is_multi_context;
+ const bool m_is_multi_context;
+ const bool m_is_scheduled;
std::map<std::string, size_t> m_network_name_to_input_count;
std::map<std::string, size_t> m_network_name_to_output_count;
uint16_t m_batch_size;
#include <string>
#include <map>
#include <unordered_map>
-#include <thread>
-#include <utility>
+
namespace hailort
{
virtual const std::string &get_network_group_name() const = 0;
virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) = 0;
-
+
virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) = 0;
/**
/**
* Creates output virtual stream params.
*
- * @param[in] quantized Whether the data fed into the chip is already quantized. True means
+ * @param[in] quantized Whether the data returned from the chip is already quantized. True means
* the data is already quantized. False means it's HailoRT's responsibility
* to quantize (scale) the data.
* @param[in] format_type The default format type for all output virtual streams.
/**
* Creates output virtual stream params. The groups are splitted with respect to their low-level streams.
*
- * @param[in] quantized Whether the data fed into the chip is already quantized. True means
+ * @param[in] quantized Whether the data returned from the chip is already quantized. True means
* the data is already quantized. False means it's HailoRT's responsibility
* to quantize (scale) the data.
* @param[in] format_type The default format type for all output virtual streams.
*/
virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const = 0;
+ /**
+ * @returns whether the network group is managed by the model scheduler.
+ */
+ virtual bool is_scheduled() const = 0;
+
/**
* Sets the maximum time period that may pass before getting run time from the scheduler,
* even without reaching the minimum required send requests (e.g. threshold - see set_scheduler_threshold()),
*/
virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name="") = 0;
+ /**
+ * Sets the priority of the network.
+ * When the network group scheduler will choose the next network, networks with higher priority will be prioritized in the selection.
+ * bigger number represent higher priority.
+ *
+ * @param[in] priority Priority as a number between HAILO_SCHEDULER_PRIORITY_MIN - HAILO_SCHEDULER_PRIORITY_MAX.
+ * @param[in] network_name Network name for which to set the Priority.
+ * If not passed, the priority will be set for all the networks in the network group.
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note Using this function is only allowed when scheduling_algorithm is not ::HAILO_SCHEDULING_ALGORITHM_NONE.
+ * @note The default priority is HAILO_SCHEDULER_PRIORITY_NORMAL.
+ * @note Currently, setting the priority for a specific network is not supported.
+ */
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name="") = 0;
+
/**
* @return Is the network group multi-context or not.
*/
virtual Expected<std::vector<InputVStream>> create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params) = 0;
virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params) = 0;
+ virtual hailo_status before_fork() { return HAILO_SUCCESS; }
+ virtual hailo_status after_fork_in_parent() { return HAILO_SUCCESS; }
+ virtual hailo_status after_fork_in_child() { return HAILO_SUCCESS; }
+
protected:
ConfiguredNetworkGroup() = default;
#include "hailo/hailort.h"
#include "hailo/hailort_common.hpp"
+
#include <math.h>
#include <fenv.h>
+
namespace hailort
{
return (T)((number - quant_info.qp_zp) * quant_info.qp_scale);
}
-private:
- template <typename T, typename Q>
- static inline Q quantize_input(T number, hailo_quant_info_t quant_info)
- {
- float32_t clipped_number = clip((float32_t)number, quant_info.limvals_min, quant_info.limvals_max);
- return (Q)rintf((clipped_number / quant_info.qp_scale) + quant_info.qp_zp);
- }
-
static inline float32_t clip(float32_t n, float32_t limval_min, float32_t limval_max)
{
if (n >= limval_max) {
return n;
}
}
+
+private:
+ template <typename T, typename Q>
+ static inline Q quantize_input(T number, hailo_quant_info_t quant_info)
+ {
+ float32_t clipped_number = clip((float32_t)number, quant_info.limvals_min, quant_info.limvals_max);
+ return (Q)rintf((clipped_number / quant_info.qp_scale) + quant_info.qp_zp);
+ }
};
} /* namespace hailort */
#include <type_traits>
#include <memory>
+
namespace hailort
{
* Add a new measurement to the Accumulator, updating the statistics measured.
*
* @param data The measurement to be added.
+ * @param samples_count The weight of the measurement to be considered in average calculations.
* @note Implementations of this interface are to update the statistics in constant time
*/
- virtual void add_data_point(T data) = 0;
+ virtual void add_data_point(T data, uint32_t samples_count = 1) = 0;
/**
* Gets the current statistics of the data added to the Accumulator, clearing the statistics afterwards.
#include "hailo/event.hpp"
#include <memory>
-#include <map>
#include <chrono>
#include <atomic>
+#include <functional>
+
namespace hailort
{
// Forward declaration
struct LayerInfo;
+class DmaMappedBuffer;
+
+using TransferDoneCallback = std::function<void(std::shared_ptr<DmaMappedBuffer> buffer,
+ const hailo_async_transfer_completion_info_t &status,
+ void *opaque)>;
+
/*! Input (host to device) stream representation */
class HAILORTAPI InputStream
/**
* @returns a pointer for network group activated event.
*/
- virtual EventPtr &get_network_group_activated_event() = 0;
+ EventPtr &get_network_group_activated_event()
+ DEPRECATED("'InputStream::get_network_group_activated_event' is deprecated.");
/**
- * @returns whether the stream is managed by a network group scheduler.
+ * @returns whether the stream is managed by the model scheduler.
*/
virtual bool is_scheduled() = 0;
*/
virtual hailo_status write(const MemoryView &buffer);
+ // ******************************************** NOTE ******************************************** //
+ // Async Stream API and DmaMappedBuffer are currently not supported and are for internal use only //
+ // ********************************************************************************************** //
+ virtual hailo_status wait_for_ready(size_t transfer_size, std::chrono::milliseconds timeout); // Internal use only
+ virtual hailo_status write_async(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback,
+ void *opaque = nullptr); // Internal use only
+
/**
* @returns A ::hailo_stream_info_t object containing the stream's info.
*/
*/
virtual std::string to_string() const;
+ // get_network_group_activated_event is same as this function
+ virtual EventPtr &get_core_op_activated_event() = 0;
protected:
InputStream() = default;
- InputStream(InputStream &&) = default;
+ InputStream(InputStream &&) = delete;
// Note: Implement sync_write_all_raw_buffer_no_transform_impl for the actual stream interaction in sub classes
virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) = 0;
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) = 0;
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) = 0;
virtual hailo_status deactivate_stream() = 0;
virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) = 0;
private:
friend class HefConfigurator;
friend class ConfiguredNetworkGroupBase;
+ friend class CoreOp;
};
/*! Output (device to host) stream representation */
/**
* @returns a pointer for network group activated event.
*/
- virtual EventPtr &get_network_group_activated_event() = 0;
+ EventPtr &get_network_group_activated_event()
+ DEPRECATED("'OutputStream::get_network_group_activated_event' is deprecated.");
/**
- * @returns whether the stream is managed by a network group scheduler.
+ * @returns whether the stream is managed by the model scheduler.
*/
virtual bool is_scheduled() = 0;
-
+
/**
* @returns the stream's info.
*/
*/
virtual hailo_status read(MemoryView buffer);
+ // ******************************************** NOTE ******************************************** //
+ // Async Stream API and DmaMappedBuffer are currently not supported and are for internal use only //
+ // ********************************************************************************************** //
+ virtual hailo_status wait_for_ready(size_t transfer_size, std::chrono::milliseconds timeout); // Internal use only
+ virtual hailo_status read_async(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback,
+ void *opaque = nullptr); // Internal use only
+
+ // get_network_group_activated_event is same as this function
+ virtual EventPtr &get_core_op_activated_event() = 0;
protected:
OutputStream() = default;
- OutputStream(OutputStream&&);
+ OutputStream(OutputStream&&) = delete;
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) = 0;
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) = 0;
virtual hailo_status deactivate_stream() = 0;
virtual hailo_status read_all(MemoryView &buffer) = 0;
friend class ConfiguredNetworkGroupBase;
friend class HwReadElement;
friend class OutputDemuxer;
+ friend class CoreOp;
};
} /* namespace hailort */
#include <map>
#include <vector>
+
namespace hailort
{
#include "hailo/network_group.hpp"
#include "hailo/device.hpp"
+
namespace hailort
{
-#define HAILO_ENABLE_MULTI_DEVICE_SCHEDULER "HAILO_ENABLE_MULTI_DEVICE_SCHEDULER"
-
/*! Represents a bundle of physical devices. */
class HAILORTAPI VDevice
{
*/
virtual Expected<hailo_stream_interface_t> get_default_streams_interface() const = 0;
+ /**
+ * Create the default configure params from an hef.
+ *
+ * @param[in] hef A reference to an Hef object to create configure params by
+ * @return Upon success, returns Expected of a NetworkGroupsParamsMap (map of string and ConfiguredNetworkParams).
+ * Otherwise, returns Unexpected of ::hailo_status error.
+ */
+ Expected<NetworkGroupsParamsMap> create_configure_params(Hef &hef) const;
+
+ /**
+ * Create the default configure params from an hef.
+ *
+ * @param[in] hef A reference to an Hef object to create configure params by
+ * @param[in] network_group_name Name of network_group to make configure params for.
+ * @return Upon success, returns Expected of a NetworkGroupsParamsMap (map of string and ConfiguredNetworkParams).
+ * Otherwise, returns Unexpected of ::hailo_status error.
+ */
+ Expected<ConfigureNetworkParams> create_configure_params(Hef &hef, const std::string &network_group_name) const;
+
+ virtual hailo_status before_fork() { return HAILO_SUCCESS; }
+ virtual hailo_status after_fork_in_parent() { return HAILO_SUCCESS; }
+ virtual hailo_status after_fork_in_child() { return HAILO_SUCCESS; }
+
virtual ~VDevice() = default;
VDevice(const VDevice &) = delete;
VDevice &operator=(const VDevice &) = delete;
#ifndef _HAILO_VSTREAM_HPP_
#define _HAILO_VSTREAM_HPP_
-#include "hailo/transform.hpp"
-#include "hailo/stream.hpp"
#include "hailo/network_group.hpp"
#include "hailo/runtime_statistics.hpp"
static Expected<InputVStream> create(const hailo_vstream_info_t &vstream_info,
const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr network_group_activated_event,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
AccumulatorPtr pipeline_latency_accumulator);
InputVStream(InputVStream &&other) noexcept = default;
InputVStream &operator=(InputVStream &&other) noexcept = default;
*/
const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const;
+ hailo_status before_fork();
+ hailo_status after_fork_in_parent();
+ hailo_status after_fork_in_child();
+
protected:
explicit InputVStream(std::shared_ptr<InputVStreamInternal> vstream);
std::string get_pipeline_description() const;
const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- EventPtr network_group_activated_event, AccumulatorPtr pipeline_latency_accumulator);
+ EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
OutputVStream(OutputVStream &&other) noexcept = default;
OutputVStream &operator=(OutputVStream &&other) noexcept = default;
virtual ~OutputVStream() = default;
*/
const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const;
+ hailo_status before_fork();
+ hailo_status after_fork_in_parent();
+ hailo_status after_fork_in_child();
+
protected:
explicit OutputVStream(std::shared_ptr<OutputVStreamInternal> vstream);
std::string get_pipeline_description() const;
std::shared_ptr<OutputVStreamInternal> m_vstream;
friend class VStreamsBuilderUtils;
- friend class VDeviceNetworkGroup;
+ friend class VDeviceCoreOp;
};
/*! Contains the virtual streams creation functions */
message ProtoMonInfo {
string network_name = 1;
double fps = 2;
- double active_time = 3;
+ double utilization = 3;
}
enum ProtoMonStreamDirection {
PROTO__STREAM_DIRECTION__DEVICE_TO_HOST = 1;
}
+message ProtoMonDeviceInfo {
+ string device_id = 1;
+ double utilization = 2;
+ string device_arch = 3;
+}
+
message ProtoMonStreamFramesInfo {
string stream_name = 1;
ProtoMonStreamDirection stream_direction = 2;
string pid = 1;
repeated ProtoMonInfo networks_infos = 2;
repeated ProtoMonNetworkFrames net_frames_infos = 3;
+ repeated ProtoMonDeviceInfo device_infos = 4;
}
\ No newline at end of file
SET(${output} "${listVar}" PARENT_SCOPE)
ENDFUNCTION(relative_to_absolute_paths)
-add_subdirectory(os)
-add_subdirectory(net_flow)
-
set(HAILORT_CPP_SOURCES
- device.cpp
- device_internal.cpp
- control.cpp
- stream.cpp
- stream_internal.cpp
- transform.cpp
- buffer.cpp
- network_rate_calculator.cpp
- hailort_logger.cpp
hailort.cpp
- hailort_common.cpp
- sensor_config_utils.cpp
- pipeline.cpp
- pipeline_multiplexer.cpp
-
- eth_device.cpp
- eth_stream.cpp
- udp.cpp
-
- hef.cpp
- network_group_metadata.cpp
-
- context_switch/context_switch_actions.cpp
- context_switch/hcp_config_network_group.cpp
- context_switch/hcp_config_activated_network_group.cpp
- context_switch/vdma_config_network_group.cpp
- context_switch/vdevice_network_group.cpp
- context_switch/vdma_config_activated_network_group.cpp
- context_switch/network_group.cpp
- context_switch/resource_manager.cpp
- context_switch/resource_manager_builder.cpp
- context_switch/context_switch_buffer_builder.cpp
-
- channel_allocator.cpp
- inter_context_buffer.cpp
- ddr_channels_pair.cpp
- config_buffer.cpp
- d2h_events_parser.cpp
- mipi_stream.cpp
-
- vdma_channel.cpp
- vdma_descriptor_list.cpp
- vdma_device.cpp
- vdma_stream.cpp
-
- vdma/vdma_mapped_buffer_impl.cpp
- vdma/mapped_buffer.cpp
- vdma/sg_buffer.cpp
- vdma/continuous_buffer.cpp
- vdma/vdma_buffer.cpp
-
- pcie_device.cpp
- pcie_stream.cpp
-
- core_device.cpp
- core_stream.cpp
-
- vdevice.cpp
- vdevice_stream.cpp
- vdevice_stream_multiplexer_wrapper.cpp
- multi_device_scheduled_stream.cpp
-
- control_protocol.cpp
+ hailort_defaults.cpp
+)
- vstream.cpp
- inference_pipeline.cpp
+add_subdirectory(utils)
+add_subdirectory(os)
+add_subdirectory(device_common)
+add_subdirectory(vdevice)
+add_subdirectory(transform)
+add_subdirectory(stream_common)
+add_subdirectory(eth)
+add_subdirectory(vdma)
+add_subdirectory(mipi)
+add_subdirectory(hef)
+add_subdirectory(network_group)
+add_subdirectory(core_op)
+add_subdirectory(net_flow)
- network_group_scheduler.cpp
- scheduled_network_group.cpp
- scheduler_oracle.cpp
-)
+set(HAILORT_CPP_SOURCES "${HAILORT_CPP_SOURCES}" "${HAILORT_OPS_CPP_SOURCES}")
if(HAILO_BUILD_SERVICE)
- set(HAILORT_CPP_SOURCES "${HAILORT_CPP_SOURCES}" hailort_rpc_client.cpp network_group_client.cpp)
-endif()
-if(HAILO_BUILD_PROFILER)
- set(HAILORT_CPP_SOURCES "${HAILORT_CPP_SOURCES}" tracer.cpp)
+ add_subdirectory(service)
endif()
-
set(common_dir "${PROJECT_SOURCE_DIR}/common/src")
set(COMMON_C_SOURCES
${common_dir}/firmware_status.c
set(HAILORT_CPP_OS_SOURCES ${HAILORT_CPP_OS_SOURCES} CACHE INTERNAL "Absolute paths of os-related source files")
set(COMMON_C_SOURCES ${COMMON_C_SOURCES} CACHE INTERNAL "Absolute paths of common source files")
set(HAILORT_SRCS_ABS ${HAILORT_CPP_SOURCES} ${HAILORT_CPP_OS_SOURCES} ${HAILORT_COMMON_CPP_SOURCES} ${COMMON_C_SOURCES} CACHE INTERNAL "All absolute paths of hailort's source files")
+set(HAILORT_OPS_CPP_SOURCES ${HAILORT_OPS_CPP_SOURCES} PARENT_SCOPE)
SET_SOURCE_FILES_PROPERTIES(${C_SOURCES} PROPERTIES LANGUAGE CXX)
add_library(libhailort SHARED ${HAILORT_SRCS_ABS})
${HAILORT_INC_DIR}/hailo/hailort.h
${HAILORT_INC_DIR}/hailo/platform.h
+ ${HAILORT_INC_DIR}/hailo/hailort.hpp
${HAILORT_INC_DIR}/hailo/buffer.hpp
${HAILORT_INC_DIR}/hailo/device.hpp
${HAILORT_INC_DIR}/hailo/event.hpp
${HAILORT_INC_DIR}/hailo/expected.hpp
${HAILORT_INC_DIR}/hailo/hailort_common.hpp
- ${HAILORT_INC_DIR}/hailo/hailort.hpp
${HAILORT_INC_DIR}/hailo/hef.hpp
${HAILORT_INC_DIR}/hailo/network_group.hpp
${HAILORT_INC_DIR}/hailo/stream.hpp
${HAILORT_INC_DIR}/hailo/network_rate_calculator.hpp
${HAILORT_INC_DIR}/hailo/vdevice.hpp
${HAILORT_INC_DIR}/hailo/quantization.hpp
+ ${HAILORT_INC_DIR}/hailo/dma_mapped_buffer.hpp
+ ${HAILORT_INC_DIR}/hailo/hailort_defaults.hpp
)
set_target_properties(libhailort PROPERTIES
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file buffer.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "hailo/buffer.hpp"
-#include "common/logger_macros.hpp"
-#include "common/utils.hpp"
-#include "common/string_utils.hpp"
-
-#include <algorithm>
-#include <string>
-#include <cstring>
-#include <iostream>
-#include <iomanip>
-
-namespace hailort
-{
-
-static void format_buffer(std::ostream& stream, const uint8_t *buffer, size_t size)
-{
- assert(nullptr != buffer);
-
- static const bool UPPERCASE = true;
- static const size_t BYTES_PER_LINE = 32;
- static const char *BYTE_DELIM = " ";
- for (size_t offset = 0; offset < size; offset += BYTES_PER_LINE) {
- const size_t line_size = std::min(BYTES_PER_LINE, size - offset);
- stream << fmt::format("0x{:08X}", offset) << BYTE_DELIM; // 32 bit offset into a buffer should be enough
- stream << StringUtils::to_hex_string(buffer + offset, line_size, UPPERCASE, BYTE_DELIM) << std::endl;
- }
- stream << "[size = " << std::dec << size << "]";
-}
-
-Buffer::Buffer() :
- m_data(nullptr),
- m_size(0)
-{}
-
-Buffer::Buffer(Buffer&& other) :
- m_data(std::move(other.m_data)),
- m_size(std::exchange(other.m_size, 0))
-{}
-
-Expected<Buffer> Buffer::create(size_t size)
-{
- std::unique_ptr<uint8_t[]> data(new (std::nothrow) uint8_t[size]);
- if (data == nullptr) {
- LOGGER__ERROR("Failed allocating {} bytes", size);
- return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);
- }
-
- return Buffer(std::move(data), size);
-}
-
-Expected<Buffer> Buffer::create(size_t size, uint8_t default_value)
-{
- auto buffer = create(size);
- CHECK_EXPECTED(buffer);
- std::memset(static_cast<void*>(buffer->m_data.get()), default_value, size);
- return buffer;
-}
-
-Expected<BufferPtr> Buffer::create_shared(size_t size)
-{
- auto buffer = Buffer::create(size);
- CHECK_EXPECTED(buffer);
- auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
- return buffer_ptr;
-}
-
-Expected<BufferPtr> Buffer::create_shared(size_t size, uint8_t default_value)
-{
- auto buffer = Buffer::create(size, default_value);
- CHECK_EXPECTED(buffer);
- auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
- return buffer_ptr;
-}
-
-Expected<Buffer> Buffer::create(const uint8_t *src, size_t size)
-{
- auto buffer = create(size);
- CHECK_EXPECTED(buffer);
- std::memcpy(static_cast<void*>(buffer->m_data.get()), static_cast<const void*>(src), size);
- return buffer;
-}
-
-Expected<Buffer> Buffer::create(std::initializer_list<uint8_t> init)
-{
- auto buffer = create(init.size());
- CHECK_EXPECTED(buffer);
- size_t index = 0;
- for (const auto& n : init) {
- // Hackzzz
- buffer->m_data[index++] = n;
- }
-
- return buffer;
-}
-
-Expected<Buffer> Buffer::copy() const
-{
- return Buffer::create(m_data.get(), m_size);
-}
-
-Buffer& Buffer::operator=(Buffer&& other)
-{
- m_data = std::move(other.m_data);
- m_size = std::exchange(other.m_size, 0);
- return *this;
-}
-
-bool Buffer::operator==(const Buffer& rhs) const
-{
- if (m_size != rhs.m_size) {
- return false;
- }
- return (0 == std::memcmp(data(), rhs.data(), m_size));
-}
-
-bool Buffer::operator!=(const Buffer& rhs) const
-{
- if (m_size != rhs.m_size) {
- return true;
- }
- return (0 != std::memcmp(data(), rhs.data(), m_size));
-}
-
-uint8_t& Buffer::operator[](size_t pos)
-{
- assert(pos < m_size);
- return m_data[pos];
-}
-
-const uint8_t& Buffer::operator[](size_t pos) const
-{
- assert(pos < m_size);
- return m_data[pos];
-}
-
-Buffer::iterator Buffer::begin()
-{
- return iterator(data());
-}
-
-Buffer::iterator Buffer::end()
-{
- return iterator(data() + m_size);
-}
-
-uint8_t* Buffer::data() noexcept
-{
- return m_data.get();
-}
-
-const uint8_t* Buffer::data() const noexcept
-{
- return m_data.get();
-}
-
-size_t Buffer::size() const noexcept
-{
- return m_size;
-}
-
-uint8_t* Buffer::release() noexcept
-{
- m_size = 0;
- return m_data.release();
-}
-
-std::string Buffer::to_string() const
-{
- for (size_t i = 0; i < m_size; i++) {
- if (m_data[i] == 0) {
- // We'll return a string that ends at the first null in the buffer
- return std::string(reinterpret_cast<const char*>(m_data.get()));
- }
- }
-
- return std::string(reinterpret_cast<const char*>(m_data.get()), m_size);
-}
-
-// Note: This is a friend function
-std::ostream& operator<<(std::ostream& stream, const Buffer& buffer)
-{
- format_buffer(stream, buffer.data(), buffer.size());
- return stream;
-}
-
-uint16_t Buffer::as_uint16() const
-{
- return as_type<uint16_t>();
-}
-
-uint32_t Buffer::as_uint32() const
-{
- return as_type<uint32_t>();
-}
-
-uint64_t Buffer::as_uint64() const
-{
- return as_type<uint64_t>();
-}
-
-uint16_t& Buffer::as_uint16()
-{
- return as_type<uint16_t>();
-}
-
-uint32_t& Buffer::as_uint32()
-{
- return as_type<uint32_t>();
-}
-
-uint64_t& Buffer::as_uint64()
-{
- return as_type<uint64_t>();
-}
-
-Buffer::Buffer(std::unique_ptr<uint8_t[]> data, size_t size) :
- m_data(std::move(data)),
- m_size(size)
- {}
-
-MemoryView::MemoryView() :
- m_data(nullptr),
- m_size(0)
-{}
-
-MemoryView::MemoryView(Buffer &buffer) :
- m_data(buffer.data()),
- m_size(buffer.size())
-{}
-
-MemoryView::MemoryView(void *data, size_t size) :
- m_data(data),
- m_size(size)
-{}
-
-const MemoryView MemoryView::create_const(const void *data, size_t size)
-{
- return std::move(MemoryView(const_cast<void *>(data), size));
-}
-
-uint8_t* MemoryView::data() noexcept
-{
- return reinterpret_cast<uint8_t*>(m_data);
-}
-
-const uint8_t* MemoryView::data() const noexcept
-{
- return reinterpret_cast<const uint8_t*>(m_data);
-}
-
-size_t MemoryView::size() const noexcept
-{
- return m_size;
-}
-
-bool MemoryView::empty() const noexcept
-{
- return (m_data == nullptr);
-}
-
-// Note: This is a friend function
-std::ostream& operator<<(std::ostream& stream, const MemoryView& buffer)
-{
- format_buffer(stream, buffer.data(), buffer.size());
- return stream;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file channel_allocator.cpp
- * @brief Allocates vdma channel indexes, allows reusing non-boundary channels between contextes.
- **/
-
-#include "channel_allocator.hpp"
-
-
-namespace hailort
-{
-
-ChannelAllocator::ChannelAllocator(size_t max_engines_count) :
- m_max_engines_count(max_engines_count)
-{}
-
-Expected<vdma::ChannelId> ChannelAllocator::get_available_channel_id(const LayerIdentifier &layer_identifier,
- VdmaChannel::Direction direction, uint8_t engine_index)
-{
- CHECK_AS_EXPECTED(engine_index < m_max_engines_count, HAILO_INVALID_ARGUMENT,
- "Invalid engine index {}, max is {}", engine_index, m_max_engines_count);
-
- const auto found_channel = m_allocated_channels.find(layer_identifier);
- if (found_channel != m_allocated_channels.end()) {
- CHECK_AS_EXPECTED(found_channel->second.engine_index == engine_index, HAILO_INTERNAL_FAILURE,
- "Mismatch engine index");
- return Expected<vdma::ChannelId>(found_channel->second);
- }
-
- // If we reach here, we need to allocate channel index for that layer.
- std::set<vdma::ChannelId> currently_used_channel_indexes;
- for (auto channel_id_pair : m_allocated_channels) {
- currently_used_channel_indexes.insert(channel_id_pair.second);
- }
-
- uint8_t min_channel_index =
- (direction == VdmaChannel::Direction::H2D) ? MIN_H2D_CHANNEL_INDEX : MIN_D2H_CHANNEL_INDEX;
- uint8_t max_channel_index =
- (direction == VdmaChannel::Direction::H2D) ? MAX_H2D_CHANNEL_INDEX : MAX_D2H_CHANNEL_INDEX;
-
- for (uint8_t index = min_channel_index; index <= max_channel_index; ++index) {
- const vdma::ChannelId channel_id = {engine_index, index};
-
- // Check that the channel is not currently in use.
- if (contains(currently_used_channel_indexes, channel_id)) {
- continue;
- }
-
- // In the case of boundary channels, if the channel id was used in previous context as an internal channel (and
- // it was freed, so it doesn't appear in `currently_used_channel_index`), we can't reuse it.
- if (std::get<0>(layer_identifier) == LayerType::BOUNDARY) {
- if (contains(m_internal_channel_ids, channel_id)) {
- continue;
- }
- }
-
- // Found it
- insert_new_channel_id(layer_identifier, channel_id);
- return Expected<vdma::ChannelId>(channel_id);
- }
-
- LOGGER__ERROR("Failed to get available channel_index");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-hailo_status ChannelAllocator::free_channel_index(const LayerIdentifier &layer_identifier)
-{
- auto layer_channel_pair = m_allocated_channels.find(layer_identifier);
- CHECK(m_allocated_channels.end() != layer_channel_pair, HAILO_INTERNAL_FAILURE, "Failed to free channel");
- CHECK(std::get<0>(layer_channel_pair->first) != LayerType::BOUNDARY, HAILO_INTERNAL_FAILURE,
- "Can't free boundary channels");
-
- m_allocated_channels.erase(layer_channel_pair);
- return HAILO_SUCCESS;
-}
-
-const std::set<vdma::ChannelId> &ChannelAllocator::get_internal_channel_ids() const
-{
- return m_internal_channel_ids;
-}
-
-void ChannelAllocator::insert_new_channel_id(const LayerIdentifier &layer_identifier, const vdma::ChannelId &channel_id)
-{
- if (LayerType::BOUNDARY == std::get<0>(layer_identifier)) {
- m_boundary_channel_ids.insert(channel_id);
- } else {
- m_internal_channel_ids.insert(channel_id);
- }
-
- m_allocated_channels.emplace(layer_identifier, channel_id);
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file channel_allocator.hpp
- * @brief Allocates vdma channel indexes, allows reusing non-boundary channels between contextes.
- **/
-
-#ifndef _HAILO_CHANNEL_ALLOCATOR_HPP_
-#define _HAILO_CHANNEL_ALLOCATOR_HPP_
-
-#include "hailo/hailort.h"
-#include "vdma_descriptor_list.hpp"
-#include "vdma/channel_id.hpp"
-#include "vdma_channel.hpp"
-#include "layer_info.hpp"
-#include <array>
-
-namespace hailort
-{
-
-class ChannelAllocator final
-{
-public:
- explicit ChannelAllocator(size_t max_engines_count);
- ChannelAllocator(ChannelAllocator &&other) = default;
-
- Expected<vdma::ChannelId> get_available_channel_id(const LayerIdentifier &layer_identifier,
- VdmaChannel::Direction direction, uint8_t engine_index);
- hailo_status free_channel_index(const LayerIdentifier &layer_identifier);
-
- const std::set<vdma::ChannelId> &get_internal_channel_ids() const;
-
-private:
- void insert_new_channel_id(const LayerIdentifier &layer_identifier, const vdma::ChannelId &channel_id);
-
- const size_t m_max_engines_count;
-
- // Contains all channels that are currently used. This channels are released in the free_channel_index.
- std::map<LayerIdentifier, vdma::ChannelId> m_allocated_channels;
-
- // Contains all channels id allocated for the network group. This channels are never released.
- std::set<vdma::ChannelId> m_boundary_channel_ids;
- std::set<vdma::ChannelId> m_internal_channel_ids;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CHANNEL_ALLOCATOR_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file config_buffer.cpp
- * @brief Manages configuration vdma buffer. The configuration buffer contains nn-configurations in a specific
- * hw format (ccw).
- */
-
-#include "config_buffer.hpp"
-#include "vdma/sg_buffer.hpp"
-#include "vdma/continuous_buffer.hpp"
-
-#include <numeric>
-
-namespace hailort {
-
-Expected<ConfigBuffer> ConfigBuffer::create(HailoRTDriver &driver, vdma::ChannelId channel_id,
- const std::vector<uint32_t> &cfg_sizes)
-{
- const auto buffer_size = std::accumulate(cfg_sizes.begin(), cfg_sizes.end(), 0);
-
- auto buffer_ptr = should_use_ccb(driver) ?
- create_ccb_buffer(driver, buffer_size) :
- create_sg_buffer(driver, channel_id.channel_index, cfg_sizes);
- CHECK_EXPECTED(buffer_ptr);
-
- return ConfigBuffer(buffer_ptr.release(), channel_id, buffer_size);
-}
-
-ConfigBuffer::ConfigBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer,
- vdma::ChannelId channel_id, size_t total_buffer_size)
- : m_buffer(std::move(buffer)),
- m_channel_id(channel_id),
- m_total_buffer_size(total_buffer_size), m_acc_buffer_offset(0), m_acc_desc_count(0),
- m_current_buffer_size(0)
-{}
-
-Expected<uint32_t> ConfigBuffer::program_descriptors()
-{
- auto descriptors_count =
- m_buffer->program_descriptors(m_acc_buffer_offset, VdmaInterruptsDomain::NONE, VdmaInterruptsDomain::DEVICE,
- m_acc_desc_count, false);
- CHECK_EXPECTED(descriptors_count);
-
- m_acc_desc_count += descriptors_count.value();
- m_acc_buffer_offset = 0;
-
- return descriptors_count;
-}
-
-hailo_status ConfigBuffer::pad_with_nops()
-{
- static constexpr uint64_t CCW_NOP = 0x0;
-
- auto page_size = desc_page_size();
- auto buffer_size = m_total_buffer_size;
- auto buffer_residue = buffer_size % page_size;
- if (0 != buffer_residue % CCW_HEADER_SIZE) {
- LOGGER__ERROR("CFG channel buffer size must be a multiple of CCW header size ({})", CCW_HEADER_SIZE);
- return HAILO_INTERNAL_FAILURE;
- }
- /* If buffer does not fit info descriptor, the host must pad the buffer with CCW NOPs. */
- auto nop_count = (buffer_residue == 0) ? 0 : ((page_size - buffer_residue) / CCW_HEADER_SIZE);
- for (uint8_t nop_index = 0; nop_index < nop_count; nop_index++) {
- /* Generate nop transaction.
- CCW of all zeros (64'h0) should be treated as NOP - ignore CCW and expect CCW in next 64b word.
- When CSM recognize it is a NOP it pops it from the channel FIFO without forward any address/data/command,
- does not contribute to CRC calculations but return credits to the peripheral as usual. */
- write_inner(MemoryView::create_const(reinterpret_cast<const void *>(&CCW_NOP), sizeof(CCW_NOP)));
- }
- return HAILO_SUCCESS;
-}
-
-
-hailo_status ConfigBuffer::write(const MemoryView &data)
-{
- CHECK(data.size() <= size_left(), HAILO_INTERNAL_FAILURE, "Write too many config words");
- write_inner(data);
- m_current_buffer_size += data.size();
- return HAILO_SUCCESS;
-}
-
-size_t ConfigBuffer::size_left() const
-{
- assert(m_total_buffer_size >= m_current_buffer_size);
- return m_total_buffer_size - m_current_buffer_size;
-}
-
-size_t ConfigBuffer::get_current_buffer_size() const
-{
- return m_current_buffer_size;
-}
-
-uint16_t ConfigBuffer::desc_page_size() const
-{
- return m_buffer->desc_page_size();
-}
-
-vdma::ChannelId ConfigBuffer::channel_id() const
-{
- return m_channel_id;
-}
-
-CONTROL_PROTOCOL__host_buffer_info_t ConfigBuffer::get_host_buffer_info() const
-{
- return m_buffer->get_host_buffer_info(m_acc_desc_count * m_buffer->desc_page_size());
-}
-
-hailo_status ConfigBuffer::write_inner(const MemoryView &data)
-{
- size_t total_offset = (m_acc_desc_count * m_buffer->desc_page_size()) + m_acc_buffer_offset;
- auto status = m_buffer->write(data.data(), data.size(), total_offset);
- CHECK_SUCCESS(status);
-
- m_acc_buffer_offset += data.size();
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<vdma::VdmaBuffer>> ConfigBuffer::create_sg_buffer(HailoRTDriver &driver,
- uint8_t vdma_channel_index, const std::vector<uint32_t> &cfg_sizes)
-{
- auto desc_sizes_pair = VdmaDescriptorList::get_desc_buffer_sizes_for_multiple_transfers(driver, 1, cfg_sizes);
- CHECK_EXPECTED(desc_sizes_pair);
-
- auto page_size = desc_sizes_pair->first;
- auto descs_count = desc_sizes_pair->second;
-
- auto buffer = vdma::SgBuffer::create(driver, descs_count, page_size, HailoRTDriver::DmaDirection::H2D,
- vdma_channel_index);
- CHECK_EXPECTED(buffer);
-
- auto buffer_ptr = make_unique_nothrow<vdma::SgBuffer>(buffer.release());
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
-}
-
-Expected<std::unique_ptr<vdma::VdmaBuffer>> ConfigBuffer::create_ccb_buffer(HailoRTDriver &driver,
- uint32_t buffer_size)
-{
- buffer_size = vdma::ContinuousBuffer::get_buffer_size(buffer_size);
- auto buffer = vdma::ContinuousBuffer::create(buffer_size, driver);
- CHECK_EXPECTED(buffer);
-
- auto buffer_ptr = make_unique_nothrow<vdma::ContinuousBuffer>(buffer.release());
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
-}
-
-bool ConfigBuffer::should_use_ccb(HailoRTDriver &driver)
-{
- switch (driver.dma_type()) {
- case HailoRTDriver::DmaType::PCIE:
- return false;
- case HailoRTDriver::DmaType::DRAM:
- return true;
- }
-
- // Shouldn't reach here
- assert(false);
- return false;
-}
-
-} /* hailort */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file config_buffer.hpp
- * @brief Manages configuration vdma buffer. The configuration buffer contains nn-configurations in a specific
- * hw format (ccw).
- */
-
-#ifndef _HAILO_CONFIG_BUFFER_HPP_
-#define _HAILO_CONFIG_BUFFER_HPP_
-
-#include "vdma/vdma_buffer.hpp"
-#include "hailo/buffer.hpp"
-
-namespace hailort {
-
-#define CCW_BYTES_IN_WORD (4)
-#define CCW_DATA_OFFSET (CCW_BYTES_IN_WORD * 2)
-#define CCW_HEADER_SIZE (CCW_DATA_OFFSET)
-
-
-class ConfigBuffer final
-{
-public:
- static Expected<ConfigBuffer> create(HailoRTDriver &driver, vdma::ChannelId channel_id,
- const std::vector<uint32_t> &cfg_sizes);
-
- // Write data to config channel
- hailo_status write(const MemoryView &data);
-
- // Program the descriptors for the data written so far
- Expected<uint32_t> program_descriptors();
-
- // On prefetch mode, we need to pad the config buffer with nops BEFORE the last write.
- hailo_status pad_with_nops();
-
- // Amount of bytes left to write into the buffer.
- size_t size_left() const;
-
- // Amount of bytes already written.
- size_t get_current_buffer_size() const;
-
- uint16_t desc_page_size() const;
- vdma::ChannelId channel_id() const;
- CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;
-
-private:
- ConfigBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, vdma::ChannelId channel_id, size_t total_buffer_size);
-
- hailo_status write_inner(const MemoryView &data);
-
- static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,
- uint8_t vdma_channel_index, const std::vector<uint32_t> &cfg_sizes);
- static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_ccb_buffer(HailoRTDriver &driver,
- uint32_t buffer_size);
-
- static bool should_use_ccb(HailoRTDriver &driver);
-
- std::unique_ptr<vdma::VdmaBuffer> m_buffer;
- vdma::ChannelId m_channel_id;
- const size_t m_total_buffer_size;
- size_t m_acc_buffer_offset;
- uint32_t m_acc_desc_count;
- size_t m_current_buffer_size;
-};
-
-} /* hailort */
-
-#endif /* _HAILO_CONFIG_BUFFER_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file active_network_group_holder.hpp
- * @brief place_holder stored in ConfigManager indicating which ConfiguredNetworkGroup is currently active
- *
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_ACTIVE_NETWORK_GROUP_HOLDER_HPP_
-#define _HAILO_CONTEXT_SWITCH_ACTIVE_NETWORK_GROUP_HOLDER_HPP_
-
-// TODO: cant we just have ActiveNetworkGroup ref under device?
-
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-
-namespace hailort
-{
-
-template <typename T>
-class ActiveNetworkGroupHolder final
-{
- public:
- ActiveNetworkGroupHolder() : m_net_group(nullptr) {}
-
- ExpectedRef<T> get()
- {
- CHECK_NOT_NULL_AS_EXPECTED(m_net_group, HAILO_INVALID_OPERATION);
- return std::ref(*m_net_group);
- }
- void set(T &net_group)
- {
- assert(!is_any_active());
- m_net_group = &net_group;
- }
-
- bool is_any_active() { return nullptr != m_net_group; }
-
- void clear() { m_net_group = nullptr; }
-
- ActiveNetworkGroupHolder(ActiveNetworkGroupHolder&) = delete;
- ActiveNetworkGroupHolder& operator=(ActiveNetworkGroupHolder&) = delete;
- ActiveNetworkGroupHolder& operator=(ActiveNetworkGroupHolder&&) = delete;
- ActiveNetworkGroupHolder(ActiveNetworkGroupHolder&&) = default;
- private:
- T *m_net_group;
-};
-
-} /* namespace hailort */
-
-#endif //_HAILO_CONTEXT_SWITCH_ACTIVE_NETWORK_GROUP_HOLDER_HPP_
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file context_switch_actions.cpp
- * @brief Contains classes represents the context switch action (Actions found in the HEFs
- * and action sent to the fw).
- **/
-
-#include "context_switch_actions.hpp"
-#include "context_switch_defs.h"
-#include "context_switch/multi_context/resource_manager.hpp"
-
-namespace hailort
-{
-
-
-static uint8_t pack_vdma_channel_id(const vdma::ChannelId &channel_id)
-{
- return static_cast<uint8_t>(channel_id.channel_index |
- (channel_id.engine_index << CONTEXT_SWITCH_DEFS__PACKED_VDMA_CHANNEL_ID__ENGINE_INDEX_SHIFT));
-}
-
-static uint8_t pack_lcu_id(uint8_t cluster_index, uint8_t lcu_index)
-{
- return static_cast<uint8_t>(lcu_index |
- (cluster_index << CONTEXT_SWITCH_DEFS__PACKED_LCU_ID_CLUSTER_INDEX_SHIFT));
-}
-
-ContextSwitchConfigAction::ContextSwitchConfigAction(Type type) :
- ContextSwitchConfigAction(type, CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT)
-{}
-
-ContextSwitchConfigAction::ContextSwitchConfigAction(Type type, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_list_type) :
- m_type(type),
- m_action_list_type(action_list_type)
-{}
-
-Expected<std::vector<Buffer>> ContextSwitchConfigAction::serialize(const ContextResources &context_resources,
- bool is_repeated /*=false*/) const
-{
- CHECK_AS_EXPECTED(m_action_list_type < CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, HAILO_INTERNAL_FAILURE,
- "Action cannot be serialized");
-
- auto header = serialize_header(is_repeated);
- CHECK_EXPECTED(header);
-
- auto params = serialize_params(context_resources);
- CHECK_EXPECTED(params);
-
- auto serialized_action = Buffer::create(header->size() + params->size());
- CHECK_EXPECTED(serialized_action);
-
- std::copy(header->begin(), header->end(), serialized_action->data());
- std::copy(params->begin(), params->end(), serialized_action->data() + header->size());
-
- std::vector<Buffer> buffers;
- buffers.emplace_back(serialized_action.release());
- return buffers;
-}
-
-ContextSwitchConfigAction::Type ContextSwitchConfigAction::get_type() const
-{
- return m_type;
-}
-
-CONTEXT_SWITCH_DEFS__ACTION_TYPE_t ContextSwitchConfigAction::get_action_list_type() const
-{
- return m_action_list_type;
-}
-
-Expected<Buffer> ContextSwitchConfigAction::serialize_header(bool is_repeated) const
-{
- CHECK_AS_EXPECTED(m_action_list_type != CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, HAILO_INTERNAL_FAILURE,
- "Action cannot be serialized");
-
- auto header = Buffer::create(sizeof(CONTROL_PROTOCOL__ACTION_HEADER_t), 0);
- CHECK_EXPECTED(header);
- header->as_type<CONTROL_PROTOCOL__ACTION_HEADER_t>().action_type = m_action_list_type;
- header->as_type<CONTROL_PROTOCOL__ACTION_HEADER_t>().is_repeated = is_repeated; // TODO: prettier
- return header.release();
-}
-
-Expected<ContextSwitchConfigActionPtr> NoneAction::create()
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) NoneAction());
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-NoneAction::NoneAction() :
- ContextSwitchConfigAction(Type::None)
-{}
-
-Expected<std::vector<Buffer>> NoneAction::serialize(const ContextResources &, bool) const
-{
- // Do nothing
- return std::vector<Buffer>();
-}
-
-bool NoneAction::supports_repeated_block() const
-{
- // None actions are ignored and aren't written to the FW's action list. Hence they can't be part of a repeated block.
- return false;
-}
-
-Expected<Buffer> NoneAction::serialize_params(const ContextResources &) const
-{
- return make_unexpected(HAILO_NOT_IMPLEMENTED);
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateConfigChannelAction::create(uint8_t config_stream_index,
- const vdma::ChannelId &channel_id, const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateConfigChannelAction(config_stream_index,
- channel_id, host_buffer_info));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateConfigChannelAction::ActivateConfigChannelAction(uint8_t config_stream_index,
- const vdma::ChannelId &channel_id, const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
- ContextSwitchConfigAction(Type::ActivateConfigChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CFG_CHANNEL),
- m_config_stream_index(config_stream_index),
- m_channel_id(channel_id),
- m_host_buffer_info(host_buffer_info)
-{}
-
-bool ActivateConfigChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be in repeated block for easier debug.
- return false;
-}
-
-Expected<Buffer> ActivateConfigChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_cfg_channel_t params{};
- params.config_stream_index = m_config_stream_index;
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.host_buffer_info = m_host_buffer_info;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> DeactivateConfigChannelAction::create(uint8_t config_stream_index,
- const vdma::ChannelId &channel_id)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DeactivateConfigChannelAction(config_stream_index,
- channel_id));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-DeactivateConfigChannelAction::DeactivateConfigChannelAction(uint8_t config_stream_index,
- const vdma::ChannelId &channel_id) :
- ContextSwitchConfigAction(Type::DeactivateConfigChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_DEACTIVATE_CFG_CHANNEL),
- m_config_stream_index(config_stream_index),
- m_channel_id(channel_id)
-{}
-
-bool DeactivateConfigChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be in repeated block for easier debug.
- return false;
-}
-
-Expected<Buffer> DeactivateConfigChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__deactivate_cfg_channel_t params{};
- params.config_stream_index = m_config_stream_index;
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> WriteDataCcwAction::create(
- Buffer &&data, uint8_t config_stream_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WriteDataCcwAction(
- std::move(data), config_stream_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WriteDataCcwAction::WriteDataCcwAction(Buffer &&data, uint8_t config_stream_index) :
- ContextSwitchConfigAction(Type::WriteDataCcw),
- m_data(std::move(data)),
- m_config_stream_index(config_stream_index)
-{}
-
-Expected<std::vector<Buffer>> WriteDataCcwAction::serialize(const ContextResources &, bool) const
-{
- // WriteDataCcwActions aren't written to the FW's action list. Hence the execute will do nothing.
- return std::vector<Buffer>();
-}
-
-bool WriteDataCcwAction::supports_repeated_block() const
-{
- // WriteDataCcwActions aren't written to the FW's action list. Hence they can't be part of a repeated block.
- return false;
-}
-
-Expected<Buffer> WriteDataCcwAction::serialize_params(const ContextResources &) const
-{
- return make_unexpected(HAILO_NOT_IMPLEMENTED);
-}
-
-Expected<ContextSwitchConfigActionPtr> AddCcwBurstAction::create(uint8_t config_stream_index, uint16_t ccw_bursts)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) AddCcwBurstAction(config_stream_index, ccw_bursts));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-AddCcwBurstAction::AddCcwBurstAction(uint8_t config_stream_index, uint16_t ccw_bursts) :
- ContextSwitchConfigAction(Type::AddCcwBurst, CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CCW_BURSTS),
- m_config_stream_index(config_stream_index),
- m_ccw_bursts(ccw_bursts)
-{}
-
-Expected<Buffer> AddCcwBurstAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__fetch_ccw_bursts_action_data_t params{};
- params.ccw_bursts = m_ccw_bursts;
- params.config_stream_index = m_config_stream_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-bool AddCcwBurstAction::supports_repeated_block() const
-{
- return false;
-}
-
-Expected<ContextSwitchConfigActionPtr> FetchCfgChannelDescriptorsAction::create(const vdma::ChannelId &channel_id,
- uint16_t desc_count)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) FetchCfgChannelDescriptorsAction(channel_id,
- desc_count));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-FetchCfgChannelDescriptorsAction::FetchCfgChannelDescriptorsAction(const vdma::ChannelId &channel_id, uint16_t desc_count) :
- ContextSwitchConfigAction(Type::FetchCfgChannelDescriptors, CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CFG_CHANNEL_DESCRIPTORS),
- m_channel_id(channel_id),
- m_desc_count(desc_count)
-{}
-
-bool FetchCfgChannelDescriptorsAction::supports_repeated_block() const
-{
- return true;
-}
-
-Expected<Buffer> FetchCfgChannelDescriptorsAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__fetch_cfg_channel_descriptors_action_data_t params{};
- params.descriptors_count = m_desc_count;
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> StartBurstCreditsTaskAction::create()
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) StartBurstCreditsTaskAction());
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-StartBurstCreditsTaskAction::StartBurstCreditsTaskAction() :
- ContextSwitchConfigAction(Type::StartBurstCreditsTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START)
-{}
-
-bool StartBurstCreditsTaskAction::supports_repeated_block() const
-{
- // We don't support repeated blocks for this action, since only one is added per group of consecutive
- // TriggerNewDataFromDataInput actions.
- return false;
-}
-
-Expected<Buffer> StartBurstCreditsTaskAction::serialize_params(const ContextResources &) const
-{
- return Buffer::create(0);
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitForNetworkGroupChangeAction::create()
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForNetworkGroupChangeAction());
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitForNetworkGroupChangeAction::WaitForNetworkGroupChangeAction() :
- ContextSwitchConfigAction(Type::WaitForNetworkGroupChange,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_APPLICATION_CHANGE_INTERRUPT)
-{}
-
-bool WaitForNetworkGroupChangeAction::supports_repeated_block() const
-{
- // Only one network group change action exists.
- return false;
-}
-
-Expected<Buffer> WaitForNetworkGroupChangeAction::serialize_params(const ContextResources &) const
-{
- return Buffer::create(0);
-}
-
-
-Expected<ContextSwitchConfigActionPtr> RepeatedAction::create(
- std::vector<ContextSwitchConfigActionPtr> &&actions)
-{
- CHECK_AS_EXPECTED(!actions.empty(), HAILO_INVALID_HEF, "Invalid sub-action count (must be greater than zero)");
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(actions.size()), HAILO_INTERNAL_FAILURE,
- "Too many repeated actions {}", actions.size());
- CHECK_AS_EXPECTED(actions[0]->supports_repeated_block(), HAILO_INVALID_HEF,
- "Invalid repeated sub-action type (Action does not support repeated)");
- CHECK_AS_EXPECTED(actions[0]->get_action_list_type() != CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, HAILO_INVALID_HEF,
- "Invalid repeated sub-action type (can't have sub-action with type CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT)");
-
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) RepeatedAction(std::move(actions)));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-RepeatedAction::RepeatedAction(std::vector<ContextSwitchConfigActionPtr> &&actions) :
- ContextSwitchConfigAction(Type::AddRepeated, CONTEXT_SWITCH_DEFS__ACTION_TYPE_REPEATED_ACTION),
- m_actions(std::move(actions)),
- m_sub_action_type(m_actions[0]->get_action_list_type())
-{}
-
-bool RepeatedAction::supports_repeated_block() const
-{
- // RepeatedActions can't be part of a repeated block themselves
- return false;
-}
-
-Expected<Buffer> RepeatedAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__repeated_action_header_t params{};
- params.sub_action_type = m_sub_action_type;
- params.last_executed = 0;
- params.count = static_cast<uint8_t>(m_actions.size());
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<std::vector<Buffer>> RepeatedAction::serialize(const ContextResources &context_resources,
- bool is_repeated/*=false*/) const
-{
- CHECK_AS_EXPECTED(!is_repeated, HAILO_INTERNAL_FAILURE, "Cant use recursive repeated");
- std::vector<Buffer> buffers;
- buffers.reserve(m_actions.size() + 1); // Contains the repeated header and all of the actions
-
- auto repeated_header = ContextSwitchConfigAction::serialize(context_resources);
- CHECK_EXPECTED(repeated_header);
- CHECK_AS_EXPECTED(repeated_header->size() == 1, HAILO_INTERNAL_FAILURE, "Repeated action header should contain one buffer");
- buffers.emplace_back(std::move(repeated_header->at(0)));
-
- for (const auto &action : m_actions) {
- assert(action->get_action_list_type() == m_sub_action_type);
- const bool REPEATED = true;
- auto action_buffers = action->serialize(context_resources, REPEATED);
- CHECK_EXPECTED(action_buffers);
- CHECK_AS_EXPECTED(action_buffers->size() == 1, HAILO_INTERNAL_FAILURE, "Sub action should contain one buffer");
- buffers.emplace_back(std::move(action_buffers->at(0)));
- }
-
- return buffers;
-}
-
-Expected<ContextSwitchConfigActionPtr> DisableLcuAction::create(uint8_t cluster_index, uint8_t lcu_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DisableLcuAction(cluster_index, lcu_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-DisableLcuAction::DisableLcuAction(uint8_t cluster_index, uint8_t lcu_index) :
- ContextSwitchConfigAction(Type::DisableLcu, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DISABLE_LCU),
- m_cluster_index(cluster_index),
- m_lcu_index(lcu_index)
-{}
-
-bool DisableLcuAction::supports_repeated_block() const
-{
- return true;
-}
-
-Expected<Buffer> DisableLcuAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__disable_lcu_action_data_t params{};
- params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitForLcuAction::create(uint8_t cluster_index, uint8_t lcu_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForLcuAction(cluster_index, lcu_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitForLcuAction::WaitForLcuAction(uint8_t cluster_index, uint8_t lcu_index) :
- ContextSwitchConfigAction(Type::WaitForLcu, CONTEXT_SWITCH_DEFS__ACTION_TYPE_LCU_INTERRUPT),
- m_cluster_index(cluster_index),
- m_lcu_index(lcu_index)
-{}
-
-bool WaitForLcuAction::supports_repeated_block() const
-{
- // Wait actions shouldn't be repeated (for easier debugging)
- return false;
-}
-
-Expected<Buffer> WaitForLcuAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__lcu_interrupt_data_t params{};
- params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> EnableLcuAction::create(uint8_t cluster_index, uint8_t lcu_index,
- uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count)
-{
- const auto is_default = (CONTEXT_SWITCH_DEFS__ENABLE_LCU_DEFAULT_KERNEL_ADDRESS == kernel_done_address) &&
- (CONTEXT_SWITCH_DEFS__ENABLE_LCU_DEFAULT_KERNEL_COUNT == kernel_done_count);
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableLcuAction(cluster_index, lcu_index,
- network_index, kernel_done_address, kernel_done_count, is_default));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-CONTEXT_SWITCH_DEFS__ACTION_TYPE_t EnableLcuAction::get_enable_lcu_action_type(bool is_default)
-{
- return is_default ? CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_LCU_DEFAULT :
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_LCU_NON_DEFAULT;
-}
-
-ContextSwitchConfigAction::Type EnableLcuAction::get_enable_lcu_type(bool is_default)
-{
- return is_default ? Type::EnableLcuDefault : Type::EnableLcuNonDefault;
-}
-
-EnableLcuAction::EnableLcuAction(uint8_t cluster_index, uint8_t lcu_index,
- uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count, bool is_default) :
- ContextSwitchConfigAction(get_enable_lcu_type(is_default), get_enable_lcu_action_type(is_default)),
- m_cluster_index(cluster_index),
- m_lcu_index(lcu_index),
- m_network_index(network_index),
- m_kernel_done_address(kernel_done_address),
- m_kernel_done_count(kernel_done_count),
- m_is_default(is_default)
-{}
-
-Expected<Buffer> EnableLcuAction::serialize_params(const ContextResources &) const
-{
- if (m_is_default) {
- CONTEXT_SWITCH_DEFS__enable_lcu_action_default_data_t params{};
- params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
- params.network_index = m_network_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
- }
- else {
- CONTEXT_SWITCH_DEFS__enable_lcu_action_non_default_data_t params{};
- params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
- params.kernel_done_address = m_kernel_done_address;
- params.kernel_done_count = m_kernel_done_count;
- params.network_index = m_network_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
- }
-}
-
-bool EnableLcuAction::supports_repeated_block() const
-{
- return true;
-}
-
-Expected<ContextSwitchConfigActionPtr> EnableSequencerAction::create(uint8_t cluster_index,
- uint8_t initial_l3_cut, uint16_t initial_l3_offset, uint32_t active_apu, uint32_t active_ia,
- uint64_t active_sc, uint64_t active_l2, uint64_t l2_offset_0, uint64_t l2_offset_1)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableSequencerAction(cluster_index, initial_l3_cut,
- initial_l3_offset, active_apu, active_ia, active_sc, active_l2, l2_offset_0, l2_offset_1));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-EnableSequencerAction::EnableSequencerAction(uint8_t cluster_index, uint8_t initial_l3_cut, uint16_t initial_l3_offset,
- uint32_t active_apu, uint32_t active_ia, uint64_t active_sc, uint64_t active_l2, uint64_t l2_offset_0,
- uint64_t l2_offset_1) :
- ContextSwitchConfigAction(Type::TriggerSequencer, CONTEXT_SWITCH_DEFS__ACTION_TYPE_TRIGGER_SEQUENCER),
- m_cluster_index(cluster_index),
- m_initial_l3_cut(initial_l3_cut),
- m_initial_l3_offset(initial_l3_offset),
- m_active_apu(active_apu),
- m_active_ia(active_ia),
- m_active_sc(active_sc),
- m_active_l2(active_l2),
- m_l2_offset_0(l2_offset_0),
- m_l2_offset_1(l2_offset_1)
-{}
-
-bool EnableSequencerAction::supports_repeated_block() const
-{
- return true;
-}
-
-Expected<Buffer> EnableSequencerAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__trigger_sequencer_action_data_t params{};
- params.cluster_index = m_cluster_index;
- params.sequencer_config.initial_l3_cut = m_initial_l3_cut;
- params.sequencer_config.initial_l3_offset = m_initial_l3_offset;
- params.sequencer_config.active_apu = m_active_apu;
- params.sequencer_config.active_ia = m_active_ia;
- params.sequencer_config.active_sc = m_active_sc;
- params.sequencer_config.active_l2 = m_active_l2;
- params.sequencer_config.l2_offset_0 = m_l2_offset_0;
- params.sequencer_config.l2_offset_1 = m_l2_offset_1;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitForSequencerAction::create(uint8_t cluster_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForSequencerAction(cluster_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitForSequencerAction::WaitForSequencerAction(uint8_t cluster_index) :
- ContextSwitchConfigAction(Type::WaitForSequencerDone, CONTEXT_SWITCH_DEFS__ACTION_TYPE_SEQUENCER_DONE_INTERRUPT),
- m_cluster_index(cluster_index)
-{}
-
-bool WaitForSequencerAction::supports_repeated_block() const
-{
- // Wait actions shouldn't be repeated (for easier debugging)
- return false;
-}
-
-Expected<Buffer> WaitForSequencerAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__sequencer_interrupt_data_t params{};
- params.sequencer_index = m_cluster_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> AllowInputDataflowAction::create(uint8_t stream_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) AllowInputDataflowAction(stream_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-
-AllowInputDataflowAction::AllowInputDataflowAction(uint8_t stream_index) :
- ContextSwitchConfigAction(Type::TriggerNewDataFromDataInput,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_DATA_FROM_VDMA_CHANNEL),
- m_stream_index(stream_index)
-{}
-
-bool AllowInputDataflowAction::supports_repeated_block() const
-{
- // DDR threads are implemented on HailoRT so no FW action is required. Hence they can't be part of a repeated block.
- if (Type::TriggerNewDataFromDataInputDdr == m_type) {
- return false;
- }
-
- return true;
-}
-
-Expected<Buffer> AllowInputDataflowAction::serialize_params(const ContextResources &context_resources) const
-{
- for (const auto &edge_layer : context_resources.get_boundary_layers()) {
- if (m_stream_index == edge_layer.layer_info.stream_index) {
- CONTEXT_SWITCH_DEFS__fetch_data_action_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer.channel_id);
- params.stream_index = m_stream_index;
- params.network_index = edge_layer.layer_info.network_index;
- params.credit_type = CONTEXT_SWITCH_DEFS__CREDIT_IN_BYTES;
- params.host_buffer_type = CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t(edge_layer.buffer_info.buffer_type);
- params.periph_bytes_per_buffer = edge_layer.layer_info.nn_stream_config.periph_bytes_per_buffer;
- params.frame_periph_size = edge_layer.layer_info.nn_stream_config.periph_bytes_per_buffer *
- edge_layer.layer_info.nn_stream_config.periph_buffers_per_frame;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
- }
- }
-
- for (const auto &edge_layer : context_resources.get_inter_context_layers()) {
- if (m_stream_index == edge_layer.layer_info.stream_index) {
- CONTEXT_SWITCH_DEFS__fetch_data_action_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer.channel_id);
- params.stream_index = m_stream_index;
- params.network_index = edge_layer.layer_info.network_index;
- params.credit_type = CONTEXT_SWITCH_DEFS__CREDIT_IN_DESCRIPTORS;
- params.host_buffer_type = CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t(edge_layer.buffer_info.buffer_type);
- params.periph_bytes_per_buffer = edge_layer.layer_info.nn_stream_config.periph_bytes_per_buffer;
- params.frame_periph_size = ((edge_layer.buffer_info.bytes_in_pattern - 1) / (edge_layer.buffer_info.desc_page_size)) + 1;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
- }
- }
-
- LOGGER__ERROR("Stream {} not found in edge layers", m_stream_index);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitForModuleConfigDoneAction::create(uint8_t module_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForModuleConfigDoneAction(module_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitForModuleConfigDoneAction::WaitForModuleConfigDoneAction(uint8_t module_index) :
- ContextSwitchConfigAction(Type::WaitForModuleConfigDone, CONTEXT_SWITCH_DEFS__ACTION_TYPE_MODULE_CONFIG_DONE_INTERRUPT),
- m_module_index(module_index)
-{}
-
-bool WaitForModuleConfigDoneAction::supports_repeated_block() const
-{
- // Wait actions shouldn't be repeated (for easier debugging)
- return false;
-}
-
-Expected<Buffer> WaitForModuleConfigDoneAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__module_config_done_interrupt_data_t params{};
- params.module_index = m_module_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> DdrPairInfoAction::create(const vdma::ChannelId &h2d_channel_id,
- const vdma::ChannelId &d2h_channel_id, uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DdrPairInfoAction(
- h2d_channel_id, d2h_channel_id, network_index, descriptors_per_frame, descs_count));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-DdrPairInfoAction::DdrPairInfoAction(const vdma::ChannelId &h2d_channel_id, const vdma::ChannelId &d2h_channel_id,
- uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count) :
- ContextSwitchConfigAction(Type::DdrPairInfo, CONTEXT_SWITCH_DEFS__ACTION_TYPE_ADD_DDR_PAIR_INFO),
- m_h2d_channel_id(h2d_channel_id),
- m_d2h_channel_id(d2h_channel_id),
- m_network_index(network_index),
- m_descriptors_per_frame(descriptors_per_frame),
- m_descs_count(descs_count)
-{}
-
-bool DdrPairInfoAction::supports_repeated_block() const
-{
- return true;
-}
-
-Expected<Buffer> DdrPairInfoAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__add_ddr_pair_info_action_data_t params{};
- params.h2d_packed_vdma_channel_id = pack_vdma_channel_id(m_h2d_channel_id);
- params.d2h_packed_vdma_channel_id = pack_vdma_channel_id(m_d2h_channel_id);
- params.network_index = m_network_index;
- params.descriptors_per_frame = m_descriptors_per_frame;
- params.programmed_descriptors_count = m_descs_count;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> StartDdrBufferingTaskAction::create()
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) StartDdrBufferingTaskAction());
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-StartDdrBufferingTaskAction::StartDdrBufferingTaskAction() :
- ContextSwitchConfigAction(Type::StartDdrBufferingTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START)
-{}
-
-bool StartDdrBufferingTaskAction::supports_repeated_block() const
-{
- // There should only be one "start ddr buffering task action" per context,
- // so there's no need to support repeated blocks.
- return false;
-}
-
-Expected<Buffer> StartDdrBufferingTaskAction::serialize_params(const ContextResources &) const
-{
- return Buffer::create(0);
-}
-
-Expected<ContextSwitchConfigActionPtr> ResetDdrBufferingTaskAction::create()
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ResetDdrBufferingTaskAction());
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ResetDdrBufferingTaskAction::ResetDdrBufferingTaskAction() :
- ContextSwitchConfigAction(Type::ResetDdrBufferingTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_RESET)
-{}
-
-bool ResetDdrBufferingTaskAction::supports_repeated_block() const
-{
- // There should only be one "reset ddr buffering task action" per context at most,
- // so there's no need to support repeated blocks.
- return false;
-}
-
-Expected<Buffer> ResetDdrBufferingTaskAction::serialize_params(const ContextResources &) const
-{
- return Buffer::create(0);
-}
-
-Expected<ContextSwitchConfigActionPtr> ChangeVdmaToStreamMapping::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, bool is_dummy_stream)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ChangeVdmaToStreamMapping(channel_id, stream_index,
- is_dummy_stream));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ChangeVdmaToStreamMapping::ChangeVdmaToStreamMapping(const vdma::ChannelId &channel_id, uint8_t stream_index,
- bool is_dummy_stream) :
- ContextSwitchConfigAction(Type::ChangeVdmaToStreamMapping,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_VDMA_TO_STREAM_MAPPING),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_is_dummy_stream(is_dummy_stream)
-{}
-
-bool ChangeVdmaToStreamMapping::supports_repeated_block() const
-{
- return true;
-}
-
-Expected<Buffer> ChangeVdmaToStreamMapping::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__change_vdma_to_stream_mapping_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.is_dummy_stream = m_is_dummy_stream;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitOutputTransferDoneAction::create(uint8_t stream_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitOutputTransferDoneAction(stream_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitOutputTransferDoneAction::WaitOutputTransferDoneAction(uint8_t stream_index) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::WaitOutputTransferDone, CONTEXT_SWITCH_DEFS__ACTION_TYPE_OUTPUT_CHANNEL_TRANSFER_DONE_INTERRUPT),
- m_stream_index(stream_index)
-{}
-
-bool WaitOutputTransferDoneAction::supports_repeated_block() const
-{
- // Wait actions shouldn't be repeated (for easier debugging)
- return false;
-}
-
-Expected<Buffer> WaitOutputTransferDoneAction::serialize_params(const ContextResources &context_resources) const
-{
- const auto channel_id = get_layer_channel_id(context_resources);
- CHECK_EXPECTED(channel_id);
-
- CONTEXT_SWITCH_DEFS__vdma_dataflow_interrupt_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(channel_id.value());
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<vdma::ChannelId> WaitOutputTransferDoneAction::get_layer_channel_id(const ContextResources &context_resources) const
-{
- // TODO: HRT-8611 use one loop
- for (const auto &edge_layer : context_resources.get_boundary_layers()) {
- if (m_stream_index == edge_layer.layer_info.stream_index) {
- return vdma::ChannelId(edge_layer.channel_id);
- }
- }
-
- for (const auto &edge_layer : context_resources.get_inter_context_layers()) {
- if (m_stream_index == edge_layer.layer_info.stream_index) {
- return vdma::ChannelId(edge_layer.channel_id);
- }
- }
-
- LOGGER__ERROR("Stream {} not found in edge layers", m_stream_index);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-Expected<ContextSwitchConfigActionPtr> OpenBoundaryInputChannelAction::create(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) OpenBoundaryInputChannelAction(channel_id,
- host_buffer_info));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-OpenBoundaryInputChannelAction::OpenBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::OpenBoundaryInputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL),
- m_channel_id(channel_id),
- m_host_buffer_info(host_buffer_info)
-{}
-
-bool OpenBoundaryInputChannelAction::supports_repeated_block() const
-{
- // Open boundary actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> OpenBoundaryInputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.host_buffer_info = m_host_buffer_info;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> OpenBoundaryOutputChannelAction::create(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) OpenBoundaryOutputChannelAction(channel_id,
- host_buffer_info));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-OpenBoundaryOutputChannelAction::OpenBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::OpenBoundaryOutputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL),
- m_channel_id(channel_id),
- m_host_buffer_info(host_buffer_info)
-{}
-
-bool OpenBoundaryOutputChannelAction::supports_repeated_block() const
-{
- // Open boundary actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> OpenBoundaryOutputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__open_boundary_output_channel_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.host_buffer_info = m_host_buffer_info;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-// TODO HRT-8705: remove nn_stream_config struct (that this function won't be needed)
-static CONTEXT_SWITCH_DEFS__stream_reg_info_t parse_nn_config(const CONTROL_PROTOCOL__nn_stream_config_t &nn_config)
-{
- CONTEXT_SWITCH_DEFS__stream_reg_info_t reg_info{};
- reg_info.core_bytes_per_buffer = nn_config.core_bytes_per_buffer;
- reg_info.core_buffers_per_frame = nn_config.core_buffers_per_frame;
- reg_info.feature_padding_payload = nn_config.feature_padding_payload;
- reg_info.buffer_padding_payload = nn_config.buffer_padding_payload;
- reg_info.buffer_padding = nn_config.buffer_padding;
- reg_info.periph_bytes_per_buffer = nn_config.periph_bytes_per_buffer;
- reg_info.periph_buffers_per_frame = nn_config.periph_buffers_per_frame;
- return reg_info;
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateBoundaryInputChannelAction::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateBoundaryInputChannelAction(channel_id,
- stream_index, nn_stream_config, host_buffer_info, initial_credit_size));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateBoundaryInputChannelAction::ActivateBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateBoundaryInputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_BOUNDARY_INPUT),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_nn_stream_config(nn_stream_config),
- m_host_buffer_info(host_buffer_info),
- m_initial_credit_size(initial_credit_size)
-{}
-
-bool ActivateBoundaryInputChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ActivateBoundaryInputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_boundary_input_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.stream_reg_info = parse_nn_config(m_nn_stream_config);
- params.host_buffer_info = m_host_buffer_info;
- params.initial_credit_size = m_initial_credit_size;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateBoundaryOutputChannelAction::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateBoundaryOutputChannelAction(channel_id,
- stream_index, nn_stream_config, host_buffer_info));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateBoundaryOutputChannelAction::ActivateBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateBoundaryOutputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_BOUNDARY_OUTPUT),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_nn_stream_config(nn_stream_config),
- m_host_buffer_info(host_buffer_info)
-{}
-
-bool ActivateBoundaryOutputChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ActivateBoundaryOutputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.stream_reg_info = parse_nn_config(m_nn_stream_config);
- params.host_buffer_info = m_host_buffer_info;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateInterContextInputChannelAction::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateInterContextInputChannelAction(channel_id,
- stream_index, nn_stream_config, host_buffer_info, initial_credit_size));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateInterContextInputChannelAction::ActivateInterContextInputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateInterContextInputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_INTER_CONTEXT_INPUT),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_nn_stream_config(nn_stream_config),
- m_host_buffer_info(host_buffer_info),
- m_initial_credit_size(initial_credit_size)
-{}
-
-bool ActivateInterContextInputChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ActivateInterContextInputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_inter_context_input_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.stream_reg_info = parse_nn_config(m_nn_stream_config);
- params.host_buffer_info = m_host_buffer_info;
- params.initial_credit_size = m_initial_credit_size;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateInterContextOutputChannelAction::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateInterContextOutputChannelAction(channel_id,
- stream_index, network_index, nn_stream_config, host_buffer_info));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateInterContextOutputChannelAction::ActivateInterContextOutputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateInterContextOutputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_INTER_CONTEXT_OUTPUT),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_network_index(network_index),
- m_nn_stream_config(nn_stream_config),
- m_host_buffer_info(host_buffer_info)
-{}
-
-bool ActivateInterContextOutputChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ActivateInterContextOutputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_inter_context_output_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.network_index = m_network_index;
- params.stream_reg_info = parse_nn_config(m_nn_stream_config);
- params.host_buffer_info = m_host_buffer_info;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateDdrInputChannelAction::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
- const vdma::ChannelId &connected_d2h_channel_id)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateDdrInputChannelAction(channel_id,
- stream_index, nn_stream_config, host_buffer_info, initial_credit_size, connected_d2h_channel_id));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateDdrInputChannelAction::ActivateDdrInputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
- const vdma::ChannelId &connected_d2h_channel_id) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateDdrInputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_DDR_BUFFER_INPUT),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_nn_stream_config(nn_stream_config),
- m_host_buffer_info(host_buffer_info),
- m_initial_credit_size(initial_credit_size),
- m_connected_d2h_channel_id(connected_d2h_channel_id)
-{}
-
-bool ActivateDdrInputChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ActivateDdrInputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_ddr_buffer_input_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.stream_reg_info = parse_nn_config(m_nn_stream_config);
- params.host_buffer_info = m_host_buffer_info;
- params.initial_credit_size = m_initial_credit_size;
- params.connected_d2h_packed_vdma_channel_id = pack_vdma_channel_id(m_connected_d2h_channel_id);
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> ActivateDdrOutputChannelAction::create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateDdrOutputChannelAction(channel_id,
- stream_index, nn_stream_config, host_buffer_info, buffered_rows_count));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ActivateDdrOutputChannelAction::ActivateDdrOutputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateDdrOutputChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_DDR_BUFFER_OUTPUT),
- m_channel_id(channel_id),
- m_stream_index(stream_index),
- m_nn_stream_config(nn_stream_config),
- m_host_buffer_info(host_buffer_info),
- m_buffered_rows_count(buffered_rows_count)
-{}
-
-bool ActivateDdrOutputChannelAction::supports_repeated_block() const
-{
- // Activate actions shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ActivateDdrOutputChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.stream_index = m_stream_index;
- params.stream_reg_info = parse_nn_config(m_nn_stream_config);
- params.host_buffer_info = m_host_buffer_info;
- params.buffered_rows_count = m_buffered_rows_count;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> ValidateChannelAction::create(const vdma::ChannelId &channel_id,
- hailo_stream_direction_t stream_direction, bool is_inter_context,
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ValidateChannelAction(channel_id, stream_direction,
- is_inter_context, host_buffer_type, initial_credit_size));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-ValidateChannelAction::ValidateChannelAction(const vdma::ChannelId &channel_id,
- hailo_stream_direction_t stream_direction, bool is_inter_context,
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ValidateChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL),
- m_channel_id(channel_id),
- m_stream_direction(stream_direction),
- m_is_inter_context(is_inter_context),
- m_host_buffer_type(host_buffer_type),
- m_initial_credit_size(initial_credit_size)
-{}
-
-bool ValidateChannelAction::supports_repeated_block() const
-{
- // Validate action shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> ValidateChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__validate_vdma_channel_action_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
- static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
- static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
- params.is_inter_context = m_is_inter_context;
- params.host_buffer_type = static_cast<uint8_t>(m_host_buffer_type);
- params.initial_credit_size = m_initial_credit_size;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> DeactivateChannelAction::create(const vdma::ChannelId &channel_id,
- hailo_stream_direction_t stream_direction, bool is_inter_context,
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DeactivateChannelAction(channel_id, stream_direction,
- is_inter_context, host_buffer_type, initial_credit_size));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-DeactivateChannelAction::DeactivateChannelAction(const vdma::ChannelId &channel_id,
- hailo_stream_direction_t stream_direction, bool is_inter_context,
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::DeactivateChannel,
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_DEACTIVATE_VDMA_CHANNEL),
- m_channel_id(channel_id),
- m_stream_direction(stream_direction),
- m_is_inter_context(is_inter_context),
- m_host_buffer_type(host_buffer_type),
- m_initial_credit_size(initial_credit_size)
-{}
-
-bool DeactivateChannelAction::supports_repeated_block() const
-{
- // Deactivate action shouldn't be repeated (for easier debugging).
- return false;
-}
-
-Expected<Buffer> DeactivateChannelAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__deactivate_vdma_channel_action_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
- params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
- static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
- static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
- params.is_inter_context = m_is_inter_context;
- params.host_buffer_type = static_cast<uint8_t>(m_host_buffer_type);
- params.initial_credit_size = m_initial_credit_size;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitDmaIdleAction::create(uint8_t stream_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitDmaIdleAction(stream_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitDmaIdleAction::WaitDmaIdleAction(uint8_t stream_index) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::WaitDmaIdle, CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_DMA_IDLE_ACTION),
- m_stream_index(stream_index)
-{}
-
-bool WaitDmaIdleAction::supports_repeated_block() const
-{
- // Wait actions shouldn't be repeated (for easier debugging)
- return false;
-}
-
-Expected<Buffer> WaitDmaIdleAction::serialize_params(const ContextResources &context_resources) const
-{
- auto channel_and_type = get_layer_channel_id_and_type(context_resources);
- CHECK_EXPECTED(channel_and_type);
-
- const auto channel_id = channel_and_type->first;
- assert(LayerType::INTER_CONTEXT == channel_and_type->second || LayerType::BOUNDARY == channel_and_type->second);
- const bool is_inter_context = (LayerType::INTER_CONTEXT == channel_and_type->second);
-
- CONTEXT_SWITCH_DEFS__wait_dma_idle_data_t params{};
- params.packed_vdma_channel_id = pack_vdma_channel_id(channel_id);
- params.is_inter_context = static_cast<uint8_t>(is_inter_context);
- params.stream_index = m_stream_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<std::pair<vdma::ChannelId, LayerType>> WaitDmaIdleAction::get_layer_channel_id_and_type(
- const ContextResources &context_resources) const
-{
- // TODO: HRT-8611 use one loop
- for (const auto &edge_layer : context_resources.get_boundary_layers()) {
- if (m_stream_index == edge_layer.layer_info.stream_index) {
- return std::make_pair(edge_layer.channel_id, LayerType::BOUNDARY);
- }
- }
-
- for (const auto &edge_layer : context_resources.get_inter_context_layers()) {
- if (m_stream_index == edge_layer.layer_info.stream_index) {
- return std::make_pair(edge_layer.channel_id, LayerType::INTER_CONTEXT);
- }
- }
-
- LOGGER__ERROR("Stream {} not found in edge layers (as boundary or inter context)", m_stream_index);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-Expected<ContextSwitchConfigActionPtr> WaitNmsIdleAction::create(uint8_t aggregator_index,
- uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index, uint8_t pred_cluster_ob_interface,
- uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitNmsIdleAction(aggregator_index,
- pred_cluster_ob_index, pred_cluster_ob_cluster_index, pred_cluster_ob_interface, succ_prepost_ob_index,
- succ_prepost_ob_interface));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-WaitNmsIdleAction::WaitNmsIdleAction(uint8_t aggregator_index, uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index,
- uint8_t pred_cluster_ob_interface, uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::WaitNmsIdle, CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_NMS),
- m_aggregator_index(aggregator_index),
- m_pred_cluster_ob_index(pred_cluster_ob_index),
- m_pred_cluster_ob_cluster_index(pred_cluster_ob_cluster_index),
- m_pred_cluster_ob_interface(pred_cluster_ob_interface),
- m_succ_prepost_ob_index(succ_prepost_ob_index),
- m_succ_prepost_ob_interface(succ_prepost_ob_interface)
-{}
-
-bool WaitNmsIdleAction::supports_repeated_block() const
-{
- // Wait actions shouldn't be repeated (for easier debugging)
- return false;
-}
-
-Expected<Buffer> WaitNmsIdleAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__wait_nms_data_t params{};
- params.aggregator_index = m_aggregator_index;
- params.pred_cluster_ob_index = m_pred_cluster_ob_index;
- params.pred_cluster_ob_cluster_index = m_pred_cluster_ob_cluster_index;
- params.pred_cluster_ob_interface = m_pred_cluster_ob_interface;
- params.succ_prepost_ob_index = m_succ_prepost_ob_index;
- params.succ_prepost_ob_interface = m_succ_prepost_ob_interface;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-Expected<ContextSwitchConfigActionPtr> EnableNmsAction::create(uint8_t nms_unit_index, uint8_t network_index)
-{
- auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableNmsAction(nms_unit_index, network_index));
- CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
- return result;
-}
-
-EnableNmsAction::EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index) :
- ContextSwitchConfigAction(ContextSwitchConfigAction::Type::EnableNms, CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS),
- m_nms_unit_index(nms_unit_index),
- m_network_index(network_index)
-{}
-
-Expected<Buffer> EnableNmsAction::serialize_params(const ContextResources &) const
-{
- CONTEXT_SWITCH_DEFS__enable_nms_action_t params{};
- params.nms_unit_index = m_nms_unit_index;
- params.network_index = m_network_index;
- return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
-}
-
-bool EnableNmsAction::supports_repeated_block() const
-{
- return true;
-}
-
-ContextSwitchOperation::ContextSwitchOperation(std::vector<ContextSwitchConfigActionPtr> &&actions) :
- m_actions(std::move(actions))
-{}
-
-const std::vector<ContextSwitchConfigActionPtr> &ContextSwitchOperation::actions() const
-{
- return m_actions;
-}
-
-std::vector<ContextSwitchConfigActionPtr> ContextSwitchOperation::get_actions_of_type(
- const std::set<ContextSwitchConfigAction::Type> &action_types) const
-{
- std::vector<ContextSwitchConfigActionPtr> filtered_actions;
- for (const auto &action : m_actions) {
- if (action_types.find(action->get_type()) != action_types.end()) {
- filtered_actions.push_back(action);
- }
- }
- return filtered_actions;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file context_switch_actions.hpp
- * @brief Contains classes represents the context switch action (Actions found in the HEFs
- * and action sent to the fw).
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_ACTIONS_HPP_
-#define _HAILO_CONTEXT_SWITCH_ACTIONS_HPP_
-
-#include "hailo/expected.hpp"
-#include "hailo/buffer.hpp"
-#include "vdma/channel_id.hpp"
-#include "layer_info.hpp"
-#include "control_protocol.hpp"
-#include "context_switch_defs.h"
-
-namespace hailort
-{
-
-
-class ContextResources;
-
-class ContextSwitchConfigAction;
-using ContextSwitchConfigActionPtr = std::shared_ptr<ContextSwitchConfigAction>;
-class ContextSwitchConfigAction
-{
-public:
- enum class Type
- {
- None,
- ActivateConfigChannel,
- DeactivateConfigChannel,
- WriteDataCcw,
- AddCcwBurst,
- FetchCfgChannelDescriptors,
- TriggerSequencer,
- WaitForSequencerDone,
- TriggerNewDataFromDataInput,
- TriggerNewDataFromDataInputDdr,
- EnableLcuNonDefault,
- EnableLcuDefault,
- DisableLcu,
- WaitForLcu,
- WaitForModuleConfigDone,
- DdrPairInfo,
- StartDdrBufferingTask,
- ResetDdrBufferingTask,
- AddRepeated,
- StartBurstCreditsTask,
- WaitForNetworkGroupChange,
- ChangeVdmaToStreamMapping,
- WaitOutputTransferDone,
- OpenBoundaryInputChannel,
- OpenBoundaryOutputChannel,
- ActivateBoundaryInputChannel,
- ActivateBoundaryOutputChannel,
- ActivateInterContextInputChannel,
- ActivateInterContextOutputChannel,
- ActivateDdrInputChannel,
- ActivateDdrOutputChannel,
- ValidateChannel,
- DeactivateChannel,
- WaitDmaIdle,
- WaitNmsIdle,
- EnableNms,
- };
-
- ContextSwitchConfigAction(ContextSwitchConfigAction &&) = default;
- ContextSwitchConfigAction(const ContextSwitchConfigAction &) = delete;
- ContextSwitchConfigAction &operator=(ContextSwitchConfigAction &&) = delete;
- ContextSwitchConfigAction &operator=(const ContextSwitchConfigAction &) = delete;
- virtual ~ContextSwitchConfigAction() = default;
-
- // Serialize the action a vector of buffers - each buffer is a chunk that must be sent continuously to the firmware
- // (For example each chunk can be sub action of RepeatedAction).
- virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources,
- bool is_repeated=false) const;
-
- virtual bool supports_repeated_block() const = 0;
- Type get_type() const;
- CONTEXT_SWITCH_DEFS__ACTION_TYPE_t get_action_list_type() const;
-
-protected:
- ContextSwitchConfigAction(Type type);
- ContextSwitchConfigAction(Type type, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_list_type);
-
- Expected<Buffer> serialize_header(bool is_repeated) const;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const = 0;
-
- const Type m_type;
- const CONTEXT_SWITCH_DEFS__ACTION_TYPE_t m_action_list_type;
-};
-
-class NoneAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create();
- NoneAction(NoneAction &&) = default;
- NoneAction(const NoneAction &) = delete;
- NoneAction &operator=(NoneAction &&) = delete;
- NoneAction &operator=(const NoneAction &) = delete;
- virtual ~NoneAction() = default;
-
- virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources,
- bool is_repeated=false) const override;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- NoneAction();
-};
-
-class ActivateConfigChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t config_stream_index, const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateConfigChannelAction(uint8_t config_stream_index, const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- const uint8_t m_config_stream_index;
- const vdma::ChannelId m_channel_id;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
-};
-
-class DeactivateConfigChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t config_stream_index, const vdma::ChannelId &channel_id);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- DeactivateConfigChannelAction(uint8_t config_stream_index, const vdma::ChannelId &channel_id);
-
- const uint8_t m_config_stream_index;
- const vdma::ChannelId m_channel_id;
-};
-
-class WriteDataCcwAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(Buffer &&data, uint8_t config_stream_index);
- WriteDataCcwAction(WriteDataCcwAction &&) = default;
- WriteDataCcwAction(const WriteDataCcwAction &) = delete;
- WriteDataCcwAction &operator=(WriteDataCcwAction &&) = delete;
- WriteDataCcwAction &operator=(const WriteDataCcwAction &) = delete;
- virtual ~WriteDataCcwAction() = default;
-
- virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources,
- bool is_repeated=false) const override;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
- uint8_t config_stream_index() const { return m_config_stream_index; }
- const MemoryView data() const { return MemoryView::create_const(m_data.data(), m_data.size()); }
-
-private:
- WriteDataCcwAction(Buffer &&data, uint8_t config_stream_index);
-
- Buffer m_data;
- const uint8_t m_config_stream_index;
-};
-
-class AddCcwBurstAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t config_stream_index, uint16_t ccw_bursts);
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- AddCcwBurstAction(uint8_t config_stream_index, uint16_t ccw_bursts);
-
- const uint8_t m_config_stream_index;
- const uint16_t m_ccw_bursts;
-};
-
-class FetchCfgChannelDescriptorsAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id, uint16_t desc_count);
-
- FetchCfgChannelDescriptorsAction(FetchCfgChannelDescriptorsAction &&) = default;
- FetchCfgChannelDescriptorsAction(const FetchCfgChannelDescriptorsAction &) = delete;
- FetchCfgChannelDescriptorsAction &operator=(FetchCfgChannelDescriptorsAction &&) = delete;
- FetchCfgChannelDescriptorsAction &operator=(const FetchCfgChannelDescriptorsAction &) = delete;
- virtual ~FetchCfgChannelDescriptorsAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- FetchCfgChannelDescriptorsAction(const vdma::ChannelId &channel_id, uint16_t desc_count);
-
- const vdma::ChannelId m_channel_id;
- const uint16_t m_desc_count;
-};
-
-class StartBurstCreditsTaskAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create();
-
- StartBurstCreditsTaskAction(StartBurstCreditsTaskAction &&) = default;
- StartBurstCreditsTaskAction(const StartBurstCreditsTaskAction &) = delete;
- StartBurstCreditsTaskAction &operator=(StartBurstCreditsTaskAction &&) = delete;
- StartBurstCreditsTaskAction &operator=(const StartBurstCreditsTaskAction &) = delete;
- virtual ~StartBurstCreditsTaskAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- StartBurstCreditsTaskAction();
-};
-
-class WaitForNetworkGroupChangeAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create();
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- WaitForNetworkGroupChangeAction();
-};
-
-class RepeatedAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(std::vector<ContextSwitchConfigActionPtr> &&actions);
- RepeatedAction(RepeatedAction &&) = default;
- RepeatedAction(const RepeatedAction &) = delete;
- RepeatedAction &operator=(RepeatedAction &&) = delete;
- RepeatedAction &operator=(const RepeatedAction &) = delete;
- virtual ~RepeatedAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
- virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources,
- bool is_repeated=false) const override;
-
-private:
- RepeatedAction(std::vector<ContextSwitchConfigActionPtr> &&actions);
-
- const std::vector<ContextSwitchConfigActionPtr> m_actions;
- const CONTEXT_SWITCH_DEFS__ACTION_TYPE_t m_sub_action_type;
-};
-
-class DisableLcuAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t lcu_index);
- DisableLcuAction(DisableLcuAction &&) = default;
- DisableLcuAction(const DisableLcuAction &) = delete;
- DisableLcuAction &operator=(DisableLcuAction &&) = delete;
- DisableLcuAction &operator=(const DisableLcuAction &) = delete;
- virtual ~DisableLcuAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- DisableLcuAction(uint8_t cluster_index, uint8_t lcu_index);
-
- const uint8_t m_cluster_index;
- const uint8_t m_lcu_index;
-};
-
-
-class WaitForLcuAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t lcu_index);
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- WaitForLcuAction(uint8_t cluster_index, uint8_t lcu_index);
-
- uint8_t m_cluster_index;
- uint8_t m_lcu_index;
-};
-
-class EnableLcuAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t lcu_index,
- uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count);
- EnableLcuAction(EnableLcuAction &&) = default;
- EnableLcuAction(const EnableLcuAction &) = delete;
- EnableLcuAction &operator=(EnableLcuAction &&) = delete;
- EnableLcuAction &operator=(const EnableLcuAction &) = delete;
- virtual ~EnableLcuAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- static CONTEXT_SWITCH_DEFS__ACTION_TYPE_t get_enable_lcu_action_type(bool is_default);
- static Type get_enable_lcu_type(bool is_default);
-
- EnableLcuAction(uint8_t cluster_index, uint8_t lcu_index,
- uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count, bool is_default);
-
- const uint8_t m_cluster_index;
- const uint8_t m_lcu_index;
- const uint8_t m_network_index;
- const uint16_t m_kernel_done_address;
- const uint32_t m_kernel_done_count;
- const bool m_is_default;
-};
-
-class EnableSequencerAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t initial_l3_cut,
- uint16_t initial_l3_offset, uint32_t active_apu, uint32_t active_ia, uint64_t active_sc, uint64_t active_l2,
- uint64_t l2_offset_0, uint64_t l2_offset_1);
- EnableSequencerAction(EnableSequencerAction &&) = default;
- EnableSequencerAction(const EnableSequencerAction &) = delete;
- EnableSequencerAction &operator=(EnableSequencerAction &&) = delete;
- EnableSequencerAction &operator=(const EnableSequencerAction &) = delete;
- virtual ~EnableSequencerAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- EnableSequencerAction(uint8_t cluster_index, uint8_t initial_l3_cut, uint16_t initial_l3_offset,
- uint32_t active_apu, uint32_t active_ia, uint64_t active_sc, uint64_t active_l2, uint64_t l2_offset_0,
- uint64_t l2_offset_1);
-
- const uint8_t m_cluster_index;
- const uint8_t m_initial_l3_cut;
- const uint16_t m_initial_l3_offset;
- const uint32_t m_active_apu;
- const uint32_t m_active_ia;
- const uint64_t m_active_sc;
- const uint64_t m_active_l2;
- const uint64_t m_l2_offset_0;
- const uint64_t m_l2_offset_1;
-};
-
-class WaitForSequencerAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index);
- WaitForSequencerAction(WaitForSequencerAction &&) = default;
- WaitForSequencerAction(const WaitForSequencerAction &) = delete;
- WaitForSequencerAction &operator=(WaitForSequencerAction &&) = delete;
- WaitForSequencerAction &operator=(const WaitForSequencerAction &) = delete;
- virtual ~WaitForSequencerAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- WaitForSequencerAction(uint8_t cluster_index);
-
- const uint8_t m_cluster_index;
-};
-
-class AllowInputDataflowAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t stream_index);
- AllowInputDataflowAction(AllowInputDataflowAction &&) = default;
- AllowInputDataflowAction(const AllowInputDataflowAction &) = delete;
- AllowInputDataflowAction &operator=(AllowInputDataflowAction &&) = delete;
- AllowInputDataflowAction &operator=(const AllowInputDataflowAction &) = delete;
- virtual ~AllowInputDataflowAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- explicit AllowInputDataflowAction(uint8_t stream_index);
-
- const uint8_t m_stream_index;
-};
-
-class WaitForModuleConfigDoneAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t module_index);
- WaitForModuleConfigDoneAction(WaitForModuleConfigDoneAction &&) = default;
- WaitForModuleConfigDoneAction(const WaitForModuleConfigDoneAction &) = delete;
- WaitForModuleConfigDoneAction &operator=(WaitForModuleConfigDoneAction &&) = delete;
- WaitForModuleConfigDoneAction &operator=(const WaitForModuleConfigDoneAction &) = delete;
- virtual ~WaitForModuleConfigDoneAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- WaitForModuleConfigDoneAction(uint8_t module_index);
-
- const uint8_t m_module_index;
-};
-
-class DdrPairInfoAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &h2d_channel_id,
- const vdma::ChannelId &d2h_channel_id, uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count);
- DdrPairInfoAction(DdrPairInfoAction &&) = default;
- DdrPairInfoAction(const DdrPairInfoAction &) = delete;
- DdrPairInfoAction &operator=(DdrPairInfoAction &&) = delete;
- DdrPairInfoAction &operator=(const DdrPairInfoAction &) = delete;
- virtual ~DdrPairInfoAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- DdrPairInfoAction(const vdma::ChannelId &h2d_channel_id, const vdma::ChannelId &d2h_channel_id,
- uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count);
-
- const vdma::ChannelId m_h2d_channel_id;
- const vdma::ChannelId m_d2h_channel_id;
- const uint8_t m_network_index;
- const uint32_t m_descriptors_per_frame;
- const uint16_t m_descs_count;
-};
-
-class StartDdrBufferingTaskAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create();
- StartDdrBufferingTaskAction(StartDdrBufferingTaskAction &&) = default;
- StartDdrBufferingTaskAction(const StartDdrBufferingTaskAction &) = delete;
- StartDdrBufferingTaskAction &operator=(StartDdrBufferingTaskAction &&) = delete;
- StartDdrBufferingTaskAction &operator=(const StartDdrBufferingTaskAction &) = delete;
- virtual ~StartDdrBufferingTaskAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- StartDdrBufferingTaskAction();
-};
-
-class ResetDdrBufferingTaskAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create();
- ResetDdrBufferingTaskAction(ResetDdrBufferingTaskAction &&) = default;
- ResetDdrBufferingTaskAction(const ResetDdrBufferingTaskAction &) = delete;
- ResetDdrBufferingTaskAction &operator=(ResetDdrBufferingTaskAction &&) = delete;
- ResetDdrBufferingTaskAction &operator=(const ResetDdrBufferingTaskAction &) = delete;
- virtual ~ResetDdrBufferingTaskAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-private:
- ResetDdrBufferingTaskAction();
-};
-
-class ChangeVdmaToStreamMapping : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id, uint8_t stream_index,
- bool is_dummy_stream);
- ChangeVdmaToStreamMapping(ChangeVdmaToStreamMapping &&) = default;
- ChangeVdmaToStreamMapping(const ChangeVdmaToStreamMapping &) = delete;
- ChangeVdmaToStreamMapping &operator=(ChangeVdmaToStreamMapping &&) = delete;
- ChangeVdmaToStreamMapping &operator=(const ChangeVdmaToStreamMapping &) = delete;
- virtual ~ChangeVdmaToStreamMapping() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ChangeVdmaToStreamMapping(const vdma::ChannelId &channel_id, uint8_t stream_index, bool is_dummy_stream);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const bool m_is_dummy_stream;
-};
-
-class WaitOutputTransferDoneAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t stream_index);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- explicit WaitOutputTransferDoneAction(uint8_t stream_index);
-
- Expected<vdma::ChannelId> get_layer_channel_id(const ContextResources &context_resources) const;
-
- uint8_t m_stream_index;
-};
-
-class OpenBoundaryInputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- OpenBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- const vdma::ChannelId m_channel_id;
- CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
-};
-
-class OpenBoundaryOutputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- OpenBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- const vdma::ChannelId m_channel_id;
- CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
-};
-
-class ActivateBoundaryInputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info,
- uint32_t initial_credit_size);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
- const uint32_t m_initial_credit_size;
-};
-
-class ActivateBoundaryOutputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
-};
-
-class ActivateInterContextInputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateInterContextInputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info,
- uint32_t initial_credit_size);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
- const uint32_t m_initial_credit_size;
-};
-
-class ActivateInterContextOutputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id, uint8_t stream_index,
- uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateInterContextOutputChannelAction(const vdma::ChannelId &channel_id, uint8_t stream_index,
- uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const uint8_t m_network_index;
- const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
-};
-
-class ActivateDdrInputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
- const vdma::ChannelId &connected_d2h_channel_id);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateDdrInputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
- const vdma::ChannelId &connected_d2h_channel_id);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
- const uint32_t m_initial_credit_size;
- const vdma::ChannelId m_connected_d2h_channel_id;
-};
-
-class ActivateDdrOutputChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ActivateDdrOutputChannelAction(const vdma::ChannelId &channel_id,
- uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
- const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count);
-
- const vdma::ChannelId m_channel_id;
- const uint8_t m_stream_index;
- const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
- const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
- const uint32_t m_buffered_rows_count;
-};
-
-class ValidateChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- hailo_stream_direction_t stream_direction, bool is_inter_context,
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- ValidateChannelAction(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction,
- bool is_inter_context, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
-
- const vdma::ChannelId m_channel_id;
- const hailo_stream_direction_t m_stream_direction;
- const bool m_is_inter_context;
- const CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t m_host_buffer_type;
- const uint32_t m_initial_credit_size;
-};
-
-class DeactivateChannelAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
- hailo_stream_direction_t stream_direction, bool is_inter_context,
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- DeactivateChannelAction(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction,
- bool is_inter_context, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
-
- const vdma::ChannelId m_channel_id;
- const hailo_stream_direction_t m_stream_direction;
- const bool m_is_inter_context;
- const CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t m_host_buffer_type;
- const uint32_t m_initial_credit_size;
-};
-
-class WaitDmaIdleAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t stream_index);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- explicit WaitDmaIdleAction(uint8_t stream_index);
-
- Expected<std::pair<vdma::ChannelId, LayerType>> get_layer_channel_id_and_type(
- const ContextResources &context_resources) const;
-
- uint8_t m_stream_index;
-};
-
-class WaitNmsIdleAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t aggregator_index,
- uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index, uint8_t pred_cluster_ob_interface,
- uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface);
-
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- WaitNmsIdleAction(uint8_t aggregator_index, uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index,
- uint8_t pred_cluster_ob_interface, uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface);
-
- uint8_t m_aggregator_index;
- uint8_t m_pred_cluster_ob_index;
- uint8_t m_pred_cluster_ob_cluster_index;
- uint8_t m_pred_cluster_ob_interface;
- uint8_t m_succ_prepost_ob_index;
- uint8_t m_succ_prepost_ob_interface;
-};
-
-class EnableNmsAction : public ContextSwitchConfigAction
-{
-public:
- static Expected<ContextSwitchConfigActionPtr> create(uint8_t nms_unit_index, uint8_t network_index);
- EnableNmsAction(EnableNmsAction &&) = default;
- EnableNmsAction(const EnableNmsAction &) = delete;
- EnableNmsAction &operator=(EnableNmsAction &&) = delete;
- EnableNmsAction &operator=(const EnableNmsAction &) = delete;
- virtual ~EnableNmsAction() = default;
- virtual bool supports_repeated_block() const override;
- virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
-
-private:
- EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index);
-
- const uint8_t m_nms_unit_index;
- const uint8_t m_network_index;
-};
-
-class ContextSwitchOperation final {
-public:
- ContextSwitchOperation(std::vector<ContextSwitchConfigActionPtr> &&actions);
-
- const std::vector<ContextSwitchConfigActionPtr> &actions() const;
- std::vector<ContextSwitchConfigActionPtr> get_actions_of_type(const std::set<ContextSwitchConfigAction::Type> &action_types) const;
-
-private:
- std::vector<ContextSwitchConfigActionPtr> m_actions;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_ACTIONS_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file context_switch_buffer_builder.cpp
- * @brief Class used to build the context switch buffer sent to the firmware
- **/
-
-#include "context_switch_buffer_builder.hpp"
-
-namespace hailort
-{
-
-ContextSwitchBufferBuilder::ContextSwitchBufferBuilder(CONTROL_PROTOCOL__context_switch_context_type_t context_type) :
- m_context_type(context_type)
-{
- // Initialize first control
- start_new_control();
-}
-
-void ContextSwitchBufferBuilder::write_action(MemoryView action)
-{
- assert(action.size() < std::numeric_limits<uint32_t>::max());
- const uint32_t action_size = static_cast<uint32_t>(action.size());
-
- if (!has_space_for_action(action_size)) {
- // Size exceeded single control size, creating a new control buffer.
- start_new_control();
- }
-
- auto &control = current_control();
- memcpy(&control.context_network_data[control.context_network_data_length], action.data(), action_size);
- control.context_network_data_length += action_size;
- control.actions_count++;
-}
-
-const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &ContextSwitchBufferBuilder::get_controls() const
-{
- return m_controls;
-}
-
-CONTROL_PROTOCOL__context_switch_context_info_single_control_t &ContextSwitchBufferBuilder::current_control()
-{
- assert(!m_controls.empty());
- return m_controls.back();
-}
-
-bool ContextSwitchBufferBuilder::has_space_for_action(uint32_t action_size)
-{
- auto &control = current_control();
- return (control.context_network_data_length + action_size) <= ARRAY_ENTRIES(control.context_network_data);
-}
-
-void ContextSwitchBufferBuilder::start_new_control()
-{
- if (!m_controls.empty()) {
- current_control().is_last_control_per_context = false;
- }
-
- // Creating a new control directly inside the vector to avoid copying the control struct.
- m_controls.emplace_back();
- auto &new_control = current_control();
- new_control.context_network_data_length = 0;
- new_control.actions_count = 0;
- new_control.context_type = static_cast<uint8_t>(m_context_type);
- new_control.is_first_control_per_context = (1 == m_controls.size());
- new_control.is_last_control_per_context = true;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file context_switch_buffer_builder.hpp
- * @brief Class used to build the context switch buffer sent to the firmware.
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_BUFFER_BUILDER_HPP_
-#define _HAILO_CONTEXT_SWITCH_BUFFER_BUILDER_HPP_
-
-#include "hailo/hailort.h"
-#include "control_protocol.hpp"
-#include "layer_info.hpp"
-#include "vdma/channel_id.hpp"
-
-namespace hailort
-{
-
-// This class manages a vector of CONTROL_PROTOCOL__context_switch_context_info_single_control_t controls to be sent
-// to the firmware. Actions are written to the control buffer, until we reach the maximum control size, then we will
-// start a new control.
-class ContextSwitchBufferBuilder final {
-public:
- ContextSwitchBufferBuilder(CONTROL_PROTOCOL__context_switch_context_type_t context_type);
-
- void write_action(MemoryView action);
- const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &get_controls() const;
-
-private:
- CONTROL_PROTOCOL__context_switch_context_info_single_control_t ¤t_control();
- bool has_space_for_action(uint32_t action_size);
- void start_new_control();
-
- CONTROL_PROTOCOL__context_switch_context_type_t m_context_type;
- std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> m_controls;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_BUFFER_BUILDER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hcp_config_activated_network_group.cpp
- * @brief HcpConfigActivatedNetworkGroup implementation
- **/
-
-#include "context_switch/single_context/hcp_config_activated_network_group.hpp"
-#include "control.hpp"
-
-namespace hailort
-{
-
-Expected<HcpConfigActivatedNetworkGroup> HcpConfigActivatedNetworkGroup::create(Device &device, std::vector<WriteMemoryInfo> &config,
- const std::string &network_group_name,
- const hailo_activate_network_group_params_t &network_group_params,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- ActiveNetGroupHolder &active_net_group_holder,
- hailo_power_mode_t power_mode, EventPtr network_group_activated_event,
- ConfiguredNetworkGroupBase &network_group)
-{
- CHECK(!active_net_group_holder.is_any_active(), make_unexpected(HAILO_INVALID_OPERATION),
- "network group is currently active. You must deactivate before activating another network_group");
-
- // Close older dataflows
- auto status = Control::close_all_streams(device);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- // Reset nn_core before writing configurations
- status = device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- for (auto &m : config) {
- status = device.write_memory(m.address, MemoryView(m.data));
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- HcpConfigActivatedNetworkGroup object(device, active_net_group_holder, network_group_name, network_group_params, input_streams, output_streams,
- power_mode, std::move(network_group_activated_event), network_group, status);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return object;
-}
-
-HcpConfigActivatedNetworkGroup::HcpConfigActivatedNetworkGroup(
- Device &device,
- ActiveNetGroupHolder &active_net_group_holder,
- const std::string &network_group_name,
- const hailo_activate_network_group_params_t &network_group_params,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- hailo_power_mode_t power_mode,
- EventPtr &&network_group_activated_event,
- ConfiguredNetworkGroupBase &network_group, hailo_status &status) :
- ActivatedNetworkGroupBase(network_group_params, input_streams, output_streams,
- std::move(network_group_activated_event), status),
- m_active_net_group_holder(active_net_group_holder),
- m_is_active(true),
- m_power_mode(power_mode),
- m_device(device),
- m_network_group_name(network_group_name)
-{
- // Validate ActivatedNetworkGroup status
- if (HAILO_SUCCESS != status) {
- return;
- }
- status = network_group.activate_impl(CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to activate network group");
- return;
- }
-}
-
-HcpConfigActivatedNetworkGroup::~HcpConfigActivatedNetworkGroup()
-{
- if (!m_is_active) {
- return;
- }
-
- auto expected_config_network_ref = m_active_net_group_holder.get();
- if (!expected_config_network_ref.has_value()) {
- LOGGER__ERROR("Error getting configured network group");
- return;
- }
- const auto &config_network_group = expected_config_network_ref.value();
-
- const auto status = config_network_group.get().deactivate_impl();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to deactivate network group");
- }
-}
-
-const std::string &HcpConfigActivatedNetworkGroup::get_network_group_name() const
-{
- return m_network_group_name;
-}
-
-} /* namespace hailort */
+++ /dev/null
-#include "context_switch/single_context/hcp_config_network_group.hpp"
-#include "network_group_internal.hpp"
-#include "control.hpp"
-
-#define OUTPUT_CHANNEL_INDEX_OFFSET (16)
-
-
-namespace hailort
-{
-
-HcpConfigNetworkGroup::HcpConfigNetworkGroup(Device &device, ActiveNetGroupHolder &active_net_group_holder,
- std::vector<WriteMemoryInfo> &&config, const ConfigureNetworkParams &config_params, NetworkGroupMetadata &&network_group_metadata,
- hailo_status &status, std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops)
- : ConfiguredNetworkGroupBase(config_params, network_group_metadata, std::move(net_flow_ops), status),
- m_config(std::move(config)), m_active_net_group_holder(active_net_group_holder), m_device(device)
-{}
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> HcpConfigNetworkGroup::create_activated_network_group(
- const hailo_activate_network_group_params_t &network_group_params, uint16_t /* dynamic_batch_size */)
-{
- auto start_time = std::chrono::steady_clock::now();
-
- auto activated_net_group = HcpConfigActivatedNetworkGroup::create(m_device, m_config, name(), network_group_params,
- m_input_streams, m_output_streams, m_active_net_group_holder, m_config_params.power_mode,
- m_network_group_activated_event, (*this));
- CHECK_EXPECTED(activated_net_group);
-
- std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr = make_unique_nothrow<HcpConfigActivatedNetworkGroup>(activated_net_group.release());
- CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
- LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and thus the network is not fully activated yet.", name(), elapsed_time_ms);
-
- return activated_net_group_ptr;
-}
-
-Expected<hailo_stream_interface_t> HcpConfigNetworkGroup::get_default_streams_interface()
-{
- return m_device.get_default_streams_interface();
-}
-
-hailo_status HcpConfigNetworkGroup::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
-{
- (void) timeout;
- (void) network_name;
- return HAILO_INVALID_OPERATION;
-}
-
-hailo_status HcpConfigNetworkGroup::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
-{
- (void) threshold;
- (void) network_name;
- return HAILO_INVALID_OPERATION;
-}
-
-Expected<std::shared_ptr<LatencyMetersMap>> HcpConfigNetworkGroup::get_latency_meters()
-{
- /* hcp does not support latnecy. return empty map */
- LatencyMetersMap empty_map;
- return make_shared_nothrow<LatencyMetersMap>(empty_map);
-}
-
-Expected<std::shared_ptr<VdmaChannel>> HcpConfigNetworkGroup::get_boundary_vdma_channel_by_stream_name(
- const std::string &stream_name)
-{
- LOGGER__ERROR("get_boundary_vdma_channel_by_stream_name function for stream name {} is not supported in hcp config manager",
- stream_name);
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-hailo_status HcpConfigNetworkGroup::activate_impl(uint16_t dynamic_batch_size)
-{
- m_active_net_group_holder.set(*this);
-
- auto status = activate_low_level_streams(dynamic_batch_size);
- CHECK_SUCCESS(status, "Failed activating low level streams");
-
- status = m_network_group_activated_event->signal();
- CHECK_SUCCESS(status, "Failed to signal network activation event");
-
- return HAILO_SUCCESS;
-}
-hailo_status HcpConfigNetworkGroup::deactivate_impl()
-{
- auto expected_config_network_ref = m_active_net_group_holder.get();
- CHECK(expected_config_network_ref.has_value(), HAILO_INTERNAL_FAILURE, "Error getting configured network group");
-
- const auto &config_network_group = expected_config_network_ref.value();
- // Make sure the network group we are deactivating is this object
- CHECK(this == std::addressof(config_network_group.get()), HAILO_INTERNAL_FAILURE,
- "Trying to deactivate different network goup");
-
- m_active_net_group_holder.clear();
-
- if (!m_network_group_activated_event) {
- return HAILO_SUCCESS;
- }
-
- m_network_group_activated_event->reset();
-
- for (auto &name_pair : m_input_streams) {
- const auto status = name_pair.second->flush();
- CHECK_SUCCESS(status, "Failed to flush input stream {}", name_pair.first);
- }
-
- auto status = deactivate_low_level_streams();
- CHECK_SUCCESS(status, "Failed deactivating low level streams");
-
- return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file resource_manager.hpp
- * @brief Manager for vdma-config network group resources, for a specific physical device
- *
- * ResourceManager is used on 2 possible flows with the following dependencies:
- *
- * !-Working with physical device-!
- * VdmaDevice (either PcieDevice or CoreDevice)
- * |--vector of VdmaConfigNetworkGroup
- * |--ResourceManager <only one>
- * |--reference to physical device
- *
- * !-Working with virtual device-!
- * VDevice
- * |--vector of VdmaDevice (either PcieDevice or CoreDevice)
- * |--vector of VDeviceNetworkGroup
- * |-- vector of VdmaConfigNetworkGroup <one per phys device>
- * |--ResourceManager <only one>
- * |--reference to physical device
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_RESOURCE_MANAGER_HPP_
-#define _HAILO_CONTEXT_SWITCH_RESOURCE_MANAGER_HPP_
-
-#include "hailo/hailort.h"
-#include "inter_context_buffer.hpp"
-#include "ddr_channels_pair.hpp"
-#include "config_buffer.hpp"
-#include "vdma_channel.hpp"
-#include "control_protocol.hpp"
-#include "pcie_device.hpp"
-#include "channel_allocator.hpp"
-#include "context_switch/context_switch_buffer_builder.hpp"
-
-
-namespace hailort
-{
-
-#define DEFAULT_ACTUAL_BATCH_SIZE (1)
-
-
-struct BoundaryEdgeLayer {
- LayerInfo layer_info;
- vdma::ChannelId channel_id;
- CONTROL_PROTOCOL__host_buffer_info_t buffer_info;
-};
-
-struct InterContextEdgeLayer {
- LayerInfo layer_info;
- vdma::ChannelId channel_id;
- CONTROL_PROTOCOL__host_buffer_info_t buffer_info;
-};
-
-struct DdrChannelEdgeLayer {
- LayerInfo layer_info;
- vdma::ChannelId channel_id;
- CONTROL_PROTOCOL__host_buffer_info_t buffer_info;
-};
-
-class ContextResources final {
-public:
- static Expected<ContextResources> create(HailoRTDriver &driver, CONTROL_PROTOCOL__context_switch_context_type_t context_type,
- const std::vector<vdma::ChannelId> &config_channels_ids, const ConfigBufferInfoMap &config_buffer_infos);
-
- const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &get_controls() const;
- ContextSwitchBufferBuilder &builder();
-
- void add_edge_layer(const BoundaryEdgeLayer &edge_layer)
- {
- m_boundary_layers.emplace_back(std::move(edge_layer));
- }
-
- void add_edge_layer(const InterContextEdgeLayer &edge_layer)
- {
- m_inter_context_layers.emplace_back(std::move(edge_layer));
- }
-
- void add_edge_layer(const DdrChannelEdgeLayer &edge_layer)
- {
- m_ddr_channel_layers.emplace_back(std::move(edge_layer));
- }
-
- const std::vector<BoundaryEdgeLayer> &get_boundary_layers() const;
- const std::vector<InterContextEdgeLayer> &get_inter_context_layers() const;
- const std::vector<DdrChannelEdgeLayer> &get_ddr_channel_layers() const;
-
- ExpectedRef<DdrChannelsPair> create_ddr_channels_pair(const DdrChannelsInfo &ddr_info);
- ExpectedRef<const DdrChannelsPair> get_ddr_channels_pair(uint8_t d2h_stream_index) const;
- const std::vector<DdrChannelsPair> &get_ddr_channels_pairs() const;
-
- // Gets edge layer for a specific direction
- std::vector<BoundaryEdgeLayer> get_boundary_layers(hailo_stream_direction_t direction) const;
- std::vector<InterContextEdgeLayer> get_inter_context_layers(hailo_stream_direction_t direction) const;
- std::vector<DdrChannelEdgeLayer> get_ddr_channel_layers(hailo_stream_direction_t direction) const;
-
- hailo_status validate_edge_layers();
-
- std::vector<ConfigBuffer> &get_config_buffers();
-
-private:
- explicit ContextResources(HailoRTDriver &driver, CONTROL_PROTOCOL__context_switch_context_type_t context_type,
- std::vector<ConfigBuffer> &&config_buffers) :
- m_driver(std::ref(driver)),
- m_builder(context_type),
- m_config_buffers(std::move(config_buffers))
- {}
-
- std::reference_wrapper<HailoRTDriver> m_driver;
- ContextSwitchBufferBuilder m_builder;
- std::vector<ConfigBuffer> m_config_buffers;
- std::vector<DdrChannelsPair> m_ddr_channels_pairs;
-
- std::vector<BoundaryEdgeLayer> m_boundary_layers;
- std::vector<InterContextEdgeLayer> m_inter_context_layers;
- std::vector<DdrChannelEdgeLayer> m_ddr_channel_layers;
-};
-
-class ResourcesManager final
-{
-public:
- static Expected<ResourcesManager> create(VdmaDevice &vdma_device, HailoRTDriver &driver,
- const ConfigureNetworkParams &config_params, std::shared_ptr<NetworkGroupMetadata> network_group_metadata,
- uint8_t net_group_index);
-
- ~ResourcesManager() = default;
- ResourcesManager(const ResourcesManager &other) = delete;
- ResourcesManager &operator=(const ResourcesManager &other) = delete;
- ResourcesManager &operator=(ResourcesManager &&other) = delete;
- ResourcesManager(ResourcesManager &&other) noexcept;
-
- ExpectedRef<InterContextBuffer> create_inter_context_buffer(uint32_t transfer_size, uint8_t src_stream_index,
- uint8_t src_context_index, const std::string &network_name);
- ExpectedRef<InterContextBuffer> get_inter_context_buffer(const IntermediateBufferKey &key);
- hailo_status create_boundary_vdma_channel(const LayerInfo &layer_info);
-
- Expected<CONTROL_PROTOCOL__application_header_t> get_control_network_group_header();
-
- Expected<std::reference_wrapper<ContextResources>> add_new_context(CONTROL_PROTOCOL__context_switch_context_type_t type,
- const ConfigBufferInfoMap &config_info={});
-
- const SupportedFeatures &get_supported_features() const
- {
- return m_network_group_metadata->supported_features();
- }
-
- VdmaDevice &get_device()
- {
- return m_vdma_device;
- }
-
- Expected<vdma::ChannelId> get_available_channel_id(const LayerIdentifier &layer_identifier,
- VdmaChannel::Direction direction, uint8_t engine_index);
- hailo_status free_channel_index(const LayerIdentifier &layer_identifier);
-
- const char* get_dev_id() const
- {
- return m_vdma_device.get_dev_id();
- }
-
- LatencyMetersMap &get_latency_meters()
- {
- return m_latency_meters;
- }
-
- Expected<hailo_stream_interface_t> get_default_streams_interface();
-
- Expected<Buffer> read_intermediate_buffer(const IntermediateBufferKey &key);
-
- hailo_status create_internal_vdma_channels();
- hailo_status register_fw_managed_vdma_channels();
- hailo_status unregister_fw_managed_vdma_channels();
- hailo_status set_inter_context_channels_dynamic_batch_size(uint16_t dynamic_batch_size);
- hailo_status open_ddr_channels();
- void abort_ddr_channels();
- void close_ddr_channels();
- hailo_status configure();
- hailo_status enable_state_machine(uint16_t dynamic_batch_size);
- hailo_status reset_state_machine(bool keep_nn_config_during_reset = false);
- Expected<uint16_t> get_network_batch_size(const std::string &network_name) const;
- Expected<std::shared_ptr<VdmaChannel>> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name);
- Expected<std::shared_ptr<const VdmaChannel>> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) const;
- hailo_power_mode_t get_power_mode() const;
-
-private:
- hailo_status fill_infer_features(CONTROL_PROTOCOL__application_header_t &app_header);
- hailo_status fill_validation_features(CONTROL_PROTOCOL__application_header_t &app_header);
- hailo_status fill_network_batch_size(CONTROL_PROTOCOL__application_header_t &app_header);
-
- std::vector<ContextResources> m_contexts_resources;
- ChannelAllocator m_channel_allocator;
- VdmaDevice &m_vdma_device;
- HailoRTDriver &m_driver;
- const ConfigureNetworkParams m_config_params;
- std::map<IntermediateBufferKey, InterContextBuffer> m_inter_context_buffers;
- std::vector<VdmaChannel> m_internal_channels;
- std::shared_ptr<NetworkGroupMetadata> m_network_group_metadata;
- uint8_t m_net_group_index;
- uint8_t m_dynamic_context_count;
- uint8_t m_total_context_count;
- const std::vector<std::string> m_network_index_map;
- LatencyMetersMap m_latency_meters; // Latency meter per network
- std::map<std::string, std::shared_ptr<VdmaChannel>> m_boundary_channels; //map of string name and connected vDMA channel
- bool m_is_configured;
- // Config channels ids are shared between all context. The following vector contains the channel id for each
- // config_stream_index.
- std::vector<vdma::ChannelId> m_config_channels_ids;
-
- ResourcesManager(VdmaDevice &vdma_device, HailoRTDriver &driver,
- ChannelAllocator &&channel_allocator, const ConfigureNetworkParams config_params,
- std::shared_ptr<NetworkGroupMetadata> &&network_group_metadata, uint8_t net_group_index,
- const std::vector<std::string> &&network_index_map, LatencyMetersMap &&latency_meters,
- std::vector<vdma::ChannelId> &&config_channels_ids);
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_RESOURCE_MANAGER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_config_activated_network_group.hpp
- * @brief TODO: Represent activated network_group from HEF
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_NETWORK_GROUP_HPP_
-#define _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_NETWORK_GROUP_HPP_
-
-#include "hailo/expected.hpp"
-#include "vdma_channel.hpp"
-#include "pcie_stream.hpp"
-#include "context_switch/active_network_group_holder.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-
-#include <vector>
-#include <map>
-#include <functional>
-
-namespace hailort
-{
-
-class VdmaConfigActivatedNetworkGroup : public ActivatedNetworkGroupBase
-{
-public:
-
- static Expected<VdmaConfigActivatedNetworkGroup> create(
- ActiveNetGroupHolder &active_net_group_holder,
- const std::string &network_group_name,
- std::shared_ptr<ResourcesManager> resources_manager,
- const hailo_activate_network_group_params_t &network_group_params,
- uint16_t dynamic_batch_size,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- EventPtr network_group_activated_event,
- AccumulatorPtr deactivation_time_accumulator,
- ConfiguredNetworkGroupBase &network_group);
-
- virtual ~VdmaConfigActivatedNetworkGroup();
-
- VdmaConfigActivatedNetworkGroup(const VdmaConfigActivatedNetworkGroup &other) = delete;
- VdmaConfigActivatedNetworkGroup &operator=(const VdmaConfigActivatedNetworkGroup &other) = delete;
- VdmaConfigActivatedNetworkGroup &operator=(VdmaConfigActivatedNetworkGroup &&other) = delete;
- VdmaConfigActivatedNetworkGroup(VdmaConfigActivatedNetworkGroup &&other) noexcept;
-
- virtual const std::string &get_network_group_name() const override;
- virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override;
- virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) override;
-
-private:
- VdmaConfigActivatedNetworkGroup(
- const std::string &network_group_name,
- const hailo_activate_network_group_params_t &network_group_params,
- uint16_t dynamic_batch_size,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- std::shared_ptr<ResourcesManager> &&resources_manager,
- ActiveNetGroupHolder &active_net_group_holder,
- EventPtr &&network_group_activated_event,
- AccumulatorPtr deactivation_time_accumulator,
- ConfiguredNetworkGroupBase &network_group, hailo_status &status);
-
- std::string m_network_group_name;
- bool m_should_reset_network_group;
- ActiveNetGroupHolder &m_active_net_group_holder;
- std::shared_ptr<ResourcesManager> m_resources_manager;
- AccumulatorPtr m_deactivation_time_accumulator;
- bool m_keep_nn_config_during_reset;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_NETWORK_GROUP_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_config_manager.hpp
- * @brief Manager of HEF parsing and vdma-configured network groups resources for Pcie devices (both single and multi context)
- *
- **/
-
-#ifndef HAILO_VDMA_CONFIG_MANAGER_HPP_
-#define HAILO_VDMA_CONFIG_MANAGER_HPP_
-
-#include "context_switch/multi_context/vdma_config_network_group.hpp"
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-
-
-namespace hailort
-{
-
-class VdmaConfigManager final
-{
-public:
- VdmaConfigManager() = delete;
-
- static hailo_status switch_network_group(std::shared_ptr<VdmaConfigNetworkGroup> current_active_ng,
- std::shared_ptr<VdmaConfigNetworkGroup> next_ng, const uint16_t batch_size)
- {
- auto status = HAILO_UNINITIALIZED;
- // If current_active_ng is nullptr - we are activating first network group
- if (nullptr != current_active_ng) {
- status = current_active_ng->deactivate_impl();
- CHECK_SUCCESS(status, "Failed deactivating current network group");
-
- // TODO: MSW-762 - In mercury we need to reset after deactivate in case of mercury - this will be fixed and the
- // If will be removed when we make the nn_manager responsible to reset the nn-core
- // And if switching to nullptr (which is final deactivate - we must also reset state machine)
- if (Device::Type::CORE == current_active_ng->get_resources_manager()->get_device().get_type() ||
- (nullptr == next_ng)) {
- status = current_active_ng->get_resources_manager()->reset_state_machine(false);
- CHECK_SUCCESS(status, "Failed to reset state machine in switch network group");
- }
- }
-
- // If next_ng is nullptr we are deactivating last network group
- if (nullptr != next_ng) {
- status = next_ng->activate_impl(batch_size);
- CHECK_SUCCESS(status, "Failed activating network group");
- }
-
- return HAILO_SUCCESS;
- }
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_VDMA_CONFIG_MANAGER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_config_network_group.hpp
- * @brief Represent network_group from HEF file that can be activated
- *
- * This network_group can be used for both single or multi context network_groups but for PCIE only
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_NETWORK_GROUP_HPP_
-#define _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_NETWORK_GROUP_HPP_
-
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-#include "control_protocol.h"
-#include "hailort_defaults.hpp"
-#include "vdma_channel.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-#include "context_switch/multi_context/vdma_config_activated_network_group.hpp"
-#include "context_switch/active_network_group_holder.hpp"
-
-#include <cstdint>
-#include <assert.h>
-#include <map>
-#include <set>
-
-namespace hailort
-{
-
-
-class VdmaConfigNetworkGroup : public ConfiguredNetworkGroupBase
-{
-public:
- static Expected<VdmaConfigNetworkGroup> create(ActiveNetGroupHolder &active_net_group_holder,
- const ConfigureNetworkParams &config_params,
- std::shared_ptr<ResourcesManager> resources_managers, const std::string &hef_hash,
- std::shared_ptr<NetworkGroupMetadata> network_group_metadata,
- std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops);
-
- std::shared_ptr<ResourcesManager> &get_resources_manager()
- {
- return m_resources_manager;
- }
-
- // Functions to activate and deactivate network group for scheduler - dont create ActivatedNetworkGroup objects
- virtual hailo_status activate_impl(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_impl() override;
-
- virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
- const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size) override;
-
- virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
-
- virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
- virtual Expected<std::shared_ptr<VdmaChannel>> get_boundary_vdma_channel_by_stream_name(
- const std::string &stream_name) override;
-
- virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
- virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
-
- virtual ~VdmaConfigNetworkGroup() = default;
- VdmaConfigNetworkGroup(const VdmaConfigNetworkGroup &other) = delete;
- VdmaConfigNetworkGroup &operator=(const VdmaConfigNetworkGroup &other) = delete;
- VdmaConfigNetworkGroup &operator=(VdmaConfigNetworkGroup &&other) = delete;
- VdmaConfigNetworkGroup(VdmaConfigNetworkGroup &&other) noexcept : ConfiguredNetworkGroupBase(std::move(other)),
- m_active_net_group_holder(other.m_active_net_group_holder),
- m_resources_manager(std::move(other.m_resources_manager)),
- m_hef_hash(std::move(other.m_hef_hash))
- {}
-
- bool equals(const Hef &hef, const std::string &network_group_name) {
- return (network_group_name == name()) && (hef.hash() == m_hef_hash);
- }
-
-private:
- VdmaConfigNetworkGroup(ActiveNetGroupHolder &active_net_group_holder,
- const ConfigureNetworkParams &config_params,
- std::shared_ptr<ResourcesManager> &&resources_manager, const std::string &hef_hash,
- const NetworkGroupMetadata &network_group_metadata, hailo_status &status,
- std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops);
-
- ActiveNetGroupHolder &m_active_net_group_holder;
- std::shared_ptr<ResourcesManager> m_resources_manager;
- std::string m_hef_hash;
-
- friend class VDeviceNetworkGroupWrapper;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_NETWORK_GROUP_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group.cpp
- * @brief: Configured Network Group and Activated Network Group
- **/
-
-#include "hailo/transform.hpp"
-#include "hailo/vstream.hpp"
-#include "network_group_internal.hpp"
-#include "hef_internal.hpp"
-#include "common/utils.hpp"
-#include "hailort_defaults.hpp"
-#include "eth_stream.hpp"
-#include "pcie_stream.hpp"
-#include "core_stream.hpp"
-#include "mipi_stream.hpp"
-#include "control.hpp"
-#include "common/runtime_statistics_internal.hpp"
-#include "vstream_internal.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-
-namespace hailort
-{
-
-ActivatedNetworkGroupBase::ActivatedNetworkGroupBase(const hailo_activate_network_group_params_t &network_group_params,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- EventPtr &&network_group_activated_event, hailo_status &status) :
- m_network_group_params(network_group_params),
- m_network_group_activated_event(std::move(network_group_activated_event)),
- m_input_streams(input_streams),
- m_output_streams(output_streams)
-{
- status = validate_network_group_params(network_group_params);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to validate network_group params");
- return;
- }
-}
-
-uint32_t ActivatedNetworkGroupBase::get_invalid_frames_count()
-{
- uint32_t total_invalid_frames_count = 0;
- for (auto& name_stream_pair : m_output_streams) {
- total_invalid_frames_count += name_stream_pair.second->get_invalid_frames_count();
- }
- return total_invalid_frames_count;
-}
-
-// TODO: Implement function (HRT-3174)
-hailo_status ActivatedNetworkGroupBase::validate_network_group_params(
- const hailo_activate_network_group_params_t &/*network_group_params*/)
-{
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroup::activate()
-{
- const auto network_group_params = HailoRTDefaults::get_network_group_params();
- return activate(network_group_params);
-}
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupBase::activate(
- const hailo_activate_network_group_params_t &network_group_params)
-{
- return create_activated_network_group(network_group_params, CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
-}
-
-Expected<std::chrono::nanoseconds> get_latency(LatencyMeterPtr &latency_meter, bool clear)
-{
- auto hw_latency = latency_meter->get_latency(clear);
- if (HAILO_NOT_AVAILABLE == hw_latency.status()) {
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
- CHECK_EXPECTED(hw_latency, "Failed getting latency");
- return hw_latency.release();
-}
-
-/* Network group base functions */
-Expected<LatencyMeasurementResult> ConfiguredNetworkGroupBase::get_latency_measurement(const std::string &network_name)
-{
- bool clear = ((m_config_params.latency & HAILO_LATENCY_CLEAR_AFTER_GET) == HAILO_LATENCY_CLEAR_AFTER_GET);
- LatencyMeasurementResult result = {};
-
- auto latency_meters_exp = get_latency_meters();
- CHECK_EXPECTED(latency_meters_exp);
- auto latency_meters = latency_meters_exp.release();
-
- if (network_name.empty()) {
- std::chrono::nanoseconds latency_sum(0);
- uint32_t measurements_count = 0;
- for (auto &latency_meter_pair : *latency_meters.get()) {
- auto hw_latency = get_latency(latency_meter_pair.second, clear);
- if (HAILO_NOT_AVAILABLE == hw_latency.status()) {
- continue;
- }
- CHECK_EXPECTED(hw_latency);
- latency_sum += hw_latency.value();
- measurements_count++;
- }
- if (0 == measurements_count) {
- LOGGER__DEBUG("No latency measurements was found");
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
- result.avg_hw_latency = latency_sum / measurements_count;
- } else {
- if(!contains(*latency_meters, network_name)) {
- LOGGER__DEBUG("No latency measurements was found for network {}", network_name);
- return make_unexpected(HAILO_NOT_FOUND);
- }
- auto hw_latency = get_latency(latency_meters->at(network_name), clear);
- if (HAILO_NOT_AVAILABLE == hw_latency.status()) {
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
- CHECK_EXPECTED(hw_latency);
- result.avg_hw_latency = hw_latency.value();
- }
- return result;
-}
-
-Expected<OutputStreamWithParamsVector> ConfiguredNetworkGroupBase::get_output_streams_from_vstream_names(
- const std::map<std::string, hailo_vstream_params_t> &outputs_params)
-{
- OutputStreamWithParamsVector results;
- std::unordered_map<std::string, hailo_vstream_params_t> outputs_edges_params;
- for (auto &name_params_pair : outputs_params) {
- auto stream_names = m_network_group_metadata.get_stream_names_from_vstream_name(name_params_pair.first);
- CHECK_EXPECTED(stream_names);
-
- for (auto &stream_name : stream_names.value()) {
- CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND);
- auto output_stream = m_output_streams.at(stream_name);
- if (output_stream->get_info().is_mux) {
- outputs_edges_params.emplace(name_params_pair);
- }
- else {
- NameToVStreamParamsMap name_to_params = {name_params_pair};
- results.emplace_back(output_stream, name_to_params);
- }
- }
- }
- // Add non mux streams to result
- hailo_status status = add_mux_streams_by_edges_names(results, outputs_edges_params);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return results;
-}
-
-// This function adds to results the OutputStreams that correspond to the edges in outputs_edges_params.
-// If an edge name appears in outputs_edges_params then all of its predecessors must appear in outputs_edges_params as well, Otherwise, an error is returned.
-// We use the set seen_edges in order to mark the edges already evaluated by one of its' predecessor.
-hailo_status ConfiguredNetworkGroupBase::add_mux_streams_by_edges_names(OutputStreamWithParamsVector &results,
- const std::unordered_map<std::string, hailo_vstream_params_t> &outputs_edges_params)
-{
- std::unordered_set<std::string> seen_edges;
- for (auto &name_params_pair : outputs_edges_params) {
- if (seen_edges.end() != seen_edges.find(name_params_pair.first)) {
- // Edge has already been seen by one of its predecessors
- continue;
- }
- auto output_streams = get_output_streams_by_vstream_name(name_params_pair.first);
- CHECK_EXPECTED_AS_STATUS(output_streams);
- CHECK(output_streams->size() == 1, HAILO_INVALID_ARGUMENT,
- "mux streams cannot be separated into multiple streams");
- auto output_stream = output_streams.release()[0];
-
- // TODO: Find a better way to get the mux edges without creating OutputDemuxer
- auto expected_demuxer = OutputDemuxer::create(*output_stream);
- CHECK_EXPECTED_AS_STATUS(expected_demuxer);
-
- NameToVStreamParamsMap name_to_params;
- for (auto &edge : expected_demuxer.value()->get_edges_stream_info()) {
- auto edge_name_params_pair = outputs_edges_params.find(edge.name);
- CHECK(edge_name_params_pair != outputs_edges_params.end(), HAILO_INVALID_ARGUMENT,
- "All edges of stream {} must be in output vstream params. edge {} is missing.",
- name_params_pair.first, edge.name);
- seen_edges.insert(edge.name);
- name_to_params.insert(*edge_name_params_pair);
- }
- results.emplace_back(output_stream, name_to_params);
- }
- return HAILO_SUCCESS;
-}
-
-Expected<OutputStreamPtrVector> ConfiguredNetworkGroupBase::get_output_streams_by_vstream_name(const std::string &name)
-{
- auto stream_names = m_network_group_metadata.get_stream_names_from_vstream_name(name);
- CHECK_EXPECTED(stream_names);
-
- OutputStreamPtrVector output_streams;
- output_streams.reserve(stream_names->size());
- for (const auto &stream_name : stream_names.value()) {
- CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND);
- output_streams.emplace_back(m_output_streams.at(stream_name));
- }
-
- return output_streams;
-}
-
-Expected<LayerInfo> ConfiguredNetworkGroupBase::get_layer_info(const std::string &stream_name)
-{
- for (auto layer_info : m_network_group_metadata.get_all_layer_infos()) {
- if (layer_info.name == stream_name) {
- return layer_info;
- }
- }
- LOGGER__ERROR("Failed to find layer with name {}", stream_name);
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-ConfiguredNetworkGroupBase::ConfiguredNetworkGroupBase(
- const ConfigureNetworkParams &config_params, const NetworkGroupMetadata &network_group_metadata,
- std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops,
- hailo_status &status) :
- m_config_params(config_params),
- m_min_configured_batch_size(get_smallest_configured_batch_size(config_params)),
- m_network_group_metadata(network_group_metadata),
- m_activation_time_accumulator(),
- m_deactivation_time_accumulator(),
- m_net_flow_ops(std::move(net_flow_ops))
-{
- auto event = Event::create_shared(Event::State::not_signalled);
- if (nullptr == event) {
- LOGGER__ERROR("Failed to create activation event");
- status = HAILO_INTERNAL_FAILURE;
- return;
- }
- m_network_group_activated_event = std::move(std::move(event));
-
- m_activation_time_accumulator = make_shared_nothrow<FullAccumulator<double>>("activation_time");
- if (nullptr == m_activation_time_accumulator) {
- LOGGER__ERROR("Failed to create activation time accumulator");
- status = HAILO_OUT_OF_HOST_MEMORY;
- return;
- };
-
- m_deactivation_time_accumulator = make_shared_nothrow<FullAccumulator<double>>("deactivation_time");
- if (nullptr == m_deactivation_time_accumulator) {
- LOGGER__ERROR("Failed to create deactivation time accumulator");
- status = HAILO_OUT_OF_HOST_MEMORY;
- return;
- };
-
- status = HAILO_SUCCESS;
-}
-
-uint16_t ConfiguredNetworkGroupBase::get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params)
-{
- // There are two possible situations:
- // 1) All networks in the network group have the same configured (and hence smallest) batch_size =>
- // We return that batch size.
- // 2) Not all of the networks have the same configured (and hence smallest) batch_size. Currently, when
- // using dynamic_batch_sizes, all networks will use the same dynamic_batch_size (until HRT-6535 is done).
- // Hence, we must not set a dynamic_batch_size to a value greater than the smallest configured network
- // batch_size (e.g. all the resources allocated are for at most the configured network batch_size).
-
- /* We iterate over all network's batch_sizes to get the non-default min.
- Ignoring HAILO_DEFAULT_BATCH_SIZE as it is not a real batch-value,
- but indicating the scheduler should optimize batches by himself */
- uint16_t min_batch_size = UINT16_MAX;
- for (const auto &network_params_pair : config_params.network_params_by_name) {
- if ((HAILO_DEFAULT_BATCH_SIZE != network_params_pair.second.batch_size) &&
- (network_params_pair.second.batch_size < min_batch_size)) {
- min_batch_size = network_params_pair.second.batch_size;
- }
- }
- return (UINT16_MAX == min_batch_size) ? DEFAULT_ACTUAL_BATCH_SIZE : min_batch_size;
-}
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupBase::activate_with_batch(uint16_t dynamic_batch_size)
-{
- return create_activated_network_group(HailoRTDefaults::get_network_group_params(), dynamic_batch_size);
-}
-
-const std::string &ConfiguredNetworkGroupBase::get_network_group_name() const
-{
- return m_network_group_metadata.network_group_name();
-}
-
-const std::string &ConfiguredNetworkGroupBase::name() const
-{
- return m_network_group_metadata.network_group_name();
-}
-
-hailo_status ConfiguredNetworkGroupBase::activate_low_level_streams(uint16_t dynamic_batch_size)
-{
- for (auto &name_pair : m_input_streams) {
- auto status = name_pair.second->activate_stream(dynamic_batch_size);
- CHECK_SUCCESS(status);
- }
- for (auto &name_pair : m_output_streams) {
- auto status = name_pair.second->activate_stream(dynamic_batch_size);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ConfiguredNetworkGroupBase::deactivate_low_level_streams()
-{
- // Best effort
- auto status = HAILO_SUCCESS;
- auto deactivate_status = HAILO_UNINITIALIZED;
- for (auto &name_pair : m_input_streams) {
- deactivate_status = name_pair.second->deactivate_stream();
- if (HAILO_SUCCESS != deactivate_status) {
- LOGGER__ERROR("Failed to deactivate input stream {}", name_pair.first);
- status = deactivate_status;
- }
- }
- for (auto &name_pair : m_output_streams) {
- deactivate_status = name_pair.second->deactivate_stream();
- if (HAILO_SUCCESS != deactivate_status) {
- LOGGER__ERROR("Failed to deactivate output stream {}", name_pair.first);
- status = deactivate_status;
- }
- }
-
- return status;
-}
-
-Expected<uint16_t> ConfiguredNetworkGroupBase::get_stream_batch_size(const std::string &stream_name)
-{
- for (const auto &layer_info : m_network_group_metadata.get_all_layer_infos()) {
- if (layer_info.name == stream_name) {
- for (auto const &network_params_pair : m_config_params.network_params_by_name) {
- if (network_params_pair.first == layer_info.network_name) {
- auto batch_size = network_params_pair.second.batch_size;
- return batch_size;
- }
- }
- }
- }
- LOGGER__ERROR("Failed to find network name output stream {}", stream_name);
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-bool ConfiguredNetworkGroupBase::is_multi_context() const
-{
- return m_network_group_metadata.supported_features().multi_context;
-}
-
-const ConfigureNetworkParams ConfiguredNetworkGroupBase::get_config_params() const
-{
- return m_config_params;
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_input_stream_from_config_params(Device &device,
- const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
-{
- auto edge_layer = get_layer_info(stream_name);
- CHECK_EXPECTED_AS_STATUS(edge_layer);
-
- CHECK(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
- "Device does not supports the given stream interface streams. Please update input_stream_params for stream {}.",
- stream_name);
-
- switch (stream_params.stream_interface) {
- case HAILO_STREAM_INTERFACE_PCIE:
- {
- auto batch_size_exp = get_stream_batch_size(stream_name);
- CHECK_EXPECTED_AS_STATUS(batch_size_exp);
- const auto stream_index = edge_layer->stream_index;
- auto vdma_channel_ptr = get_boundary_vdma_channel_by_stream_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr, "Failed to get vdma channel for output stream {}", stream_index);
-
- auto input_stream = PcieInputStream::create(device, vdma_channel_ptr.release(),
- edge_layer.value(), batch_size_exp.value(), m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(input_stream);
- m_input_streams.insert(make_pair(stream_name, input_stream.release()));
- }
- break;
- case HAILO_STREAM_INTERFACE_CORE:
- {
- auto batch_size_exp = get_stream_batch_size(stream_name);
- CHECK_EXPECTED_AS_STATUS(batch_size_exp);
- const auto stream_index = edge_layer->stream_index;
- auto vdma_channel_ptr = get_boundary_vdma_channel_by_stream_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr, "Failed to get vdma channel for output stream {}", stream_index);
-
- auto input_stream = CoreInputStream::create(device, vdma_channel_ptr.release(),
- edge_layer.value(), batch_size_exp.value(), m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(input_stream);
- m_input_streams.insert(make_pair(stream_name, input_stream.release()));
- }
- break;
- case HAILO_STREAM_INTERFACE_ETH:
- {
- auto input_stream = EthernetInputStream::create(device,
- edge_layer.value(), stream_params.eth_input_params, m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(input_stream);
- m_input_streams.insert(make_pair(stream_name, input_stream.release()));
- }
- break;
- case HAILO_STREAM_INTERFACE_MIPI:
- {
- auto input_stream = MipiInputStream::create(device,
- edge_layer.value(), stream_params.mipi_input_params, m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(input_stream);
- m_input_streams.insert(make_pair(stream_name, input_stream.release()));
- }
- break;
- default:
- {
- LOGGER__ERROR("{} interface is not supported.", stream_params.stream_interface);
- return HAILO_NOT_IMPLEMENTED;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_output_stream_from_config_params(Device &device,
- const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
-{
- auto edge_layer = get_layer_info(stream_name);
- CHECK_EXPECTED_AS_STATUS(edge_layer);
-
- CHECK(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
- "Device does not supports the given stream interface streams. Please update input_stream_params for stream {}.",
- stream_name);
-
- switch (stream_params.stream_interface) {
- case HAILO_STREAM_INTERFACE_PCIE:
- {
- auto batch_size_exp = get_stream_batch_size(stream_name);
- CHECK_EXPECTED_AS_STATUS(batch_size_exp);
- const auto stream_index = edge_layer->stream_index;
- auto vdma_channel_ptr = get_boundary_vdma_channel_by_stream_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr, "Failed to get vdma channel for output stream {}", stream_index);
-
- auto output_stream = PcieOutputStream::create(device, vdma_channel_ptr.release(),
- edge_layer.value(), batch_size_exp.value(), m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(output_stream);
- m_output_streams.insert(make_pair(stream_name, output_stream.release()));
- }
- break;
- case HAILO_STREAM_INTERFACE_CORE:
- {
- auto batch_size_exp = get_stream_batch_size(stream_name);
- CHECK_EXPECTED_AS_STATUS(batch_size_exp);
- const auto stream_index = edge_layer->stream_index;
- auto vdma_channel_ptr = get_boundary_vdma_channel_by_stream_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr, "Failed to get vdma channel for output stream {}", stream_index);
-
- auto output_stream = CoreOutputStream::create(device, vdma_channel_ptr.release(),
- edge_layer.value(), batch_size_exp.value(), m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(output_stream);
- m_output_streams.insert(make_pair(stream_name, output_stream.release()));
- }
- break;
- case HAILO_STREAM_INTERFACE_ETH:
- {
- auto output_stream = EthernetOutputStream::create(device,
- edge_layer.value(), stream_params.eth_output_params,
- m_network_group_activated_event);
- CHECK_EXPECTED_AS_STATUS(output_stream);
- m_output_streams.insert(make_pair(stream_name, output_stream.release()));
- }
- break;
- default:
- {
- LOGGER__ERROR("{} interface is not supported.", stream_params.stream_interface);
- return HAILO_NOT_IMPLEMENTED;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_streams_from_config_params(Device &device)
-{
- for (const auto &stream_parameters_pair : m_config_params.stream_params_by_name) {
- switch (stream_parameters_pair.second.direction) {
- case HAILO_H2D_STREAM:
- {
- auto status = create_input_stream_from_config_params(device,
- stream_parameters_pair.second,
- stream_parameters_pair.first);
- CHECK_SUCCESS(status);
- }
- break;
- case HAILO_D2H_STREAM:
- {
- auto status = create_output_stream_from_config_params(device,
- stream_parameters_pair.second,
- stream_parameters_pair.first);
- CHECK_SUCCESS(status);
- }
- break;
- default:
- LOGGER__ERROR("stream name {} direction is invalid.", stream_parameters_pair.first);
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<InputStreamRefVector> ConfiguredNetworkGroupBase::get_input_streams_by_network(const std::string &network_name)
-{
- auto input_stream_infos = m_network_group_metadata.get_input_stream_infos(network_name);
- CHECK_EXPECTED(input_stream_infos);
-
- InputStreamRefVector result;
- for (auto &stream_info : input_stream_infos.value()) {
- auto stream_ref = get_input_stream_by_name(stream_info.name);
- CHECK_EXPECTED(stream_ref);
- result.push_back(stream_ref.release());
- }
- return result;
-}
-
-Expected<OutputStreamRefVector> ConfiguredNetworkGroupBase::get_output_streams_by_network(const std::string &network_name)
-{
- auto output_stream_infos = m_network_group_metadata.get_output_stream_infos(network_name);
- CHECK_EXPECTED(output_stream_infos);
-
- OutputStreamRefVector result;
- for (auto &stream_info : output_stream_infos.value()) {
- auto stream_ref = get_output_stream_by_name(stream_info.name);
- CHECK_EXPECTED(stream_ref);
- result.push_back(stream_ref.release());
- }
- return result;
-}
-
-InputStreamRefVector ConfiguredNetworkGroupBase::get_input_streams()
-{
- InputStreamRefVector result;
- for (auto& name_stream_pair : m_input_streams) {
- result.emplace_back(std::ref(*name_stream_pair.second));
- }
- return result;
-}
-
-OutputStreamRefVector ConfiguredNetworkGroupBase::get_output_streams()
-{
- OutputStreamRefVector result;
- for (auto& name_stream_pair : m_output_streams) {
- result.emplace_back(std::ref(*name_stream_pair.second));
- }
- return result;
-}
-
-ExpectedRef<InputStream> ConfiguredNetworkGroupBase::get_input_stream_by_name(const std::string& name)
-{
- auto iterator = m_input_streams.find(name);
- if (m_input_streams.end() == iterator) {
- LOGGER__ERROR("Input stream name {} not found", name);
- return make_unexpected(HAILO_NOT_FOUND);
- }
-
- return std::ref<InputStream>(*iterator->second);
-}
-
-ExpectedRef<OutputStream> ConfiguredNetworkGroupBase::get_output_stream_by_name(const std::string& name)
-{
- auto iterator = m_output_streams.find(name);
- if (m_output_streams.end() == iterator) {
- LOGGER__ERROR("Output stream name {} not found", name);
- return make_unexpected(HAILO_NOT_FOUND);
- }
-
- return std::ref<OutputStream>(*iterator->second);
-}
-
-std::vector<std::reference_wrapper<InputStream>> ConfiguredNetworkGroupBase::get_input_streams_by_interface(
- hailo_stream_interface_t stream_interface)
-{
- std::vector<std::reference_wrapper<InputStream>> results;
- for (auto &name_pair : m_input_streams) {
- if (stream_interface == name_pair.second->get_interface()) {
- results.push_back(std::ref(*name_pair.second));
- }
- }
- return results;
-}
-
-std::vector<std::reference_wrapper<OutputStream>> ConfiguredNetworkGroupBase::get_output_streams_by_interface(
- hailo_stream_interface_t stream_interface)
-{
- std::vector<std::reference_wrapper<OutputStream>> results;
- for (auto &name_pair : m_output_streams) {
- if (stream_interface == name_pair.second->get_interface()) {
- results.push_back(std::ref(*name_pair.second));
- }
- }
- return results;
-}
-
-hailo_status ConfiguredNetworkGroupBase::wait_for_activation(const std::chrono::milliseconds &timeout)
-{
- return m_network_group_activated_event->wait(timeout);
-}
-
-Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroupBase::get_output_vstream_groups()
-{
- std::vector<std::vector<std::string>> results;
-
- for (auto output_stream : get_output_streams()) {
- auto vstreams_group = get_vstream_names_from_stream_name(output_stream.get().name());
- CHECK_EXPECTED(vstreams_group);
- results.push_back(vstreams_group.release());
- }
-
- return results;
-}
-
-Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroupBase::make_output_vstream_params_groups(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- auto params = make_output_vstream_params(quantized, format_type, timeout_ms, queue_size);
- CHECK_EXPECTED(params);
-
- auto groups = get_output_vstream_groups();
- CHECK_EXPECTED(groups);
-
- std::vector<std::map<std::string, hailo_vstream_params_t>> results(groups->size(), std::map<std::string, hailo_vstream_params_t>());
-
- size_t pipeline_group_index = 0;
- for (const auto &group : groups.release()) {
- for (const auto &name_pair : params.value()) {
- if (contains(group, name_pair.first)) {
- results[pipeline_group_index].insert(name_pair);
- }
- }
- pipeline_group_index++;
- }
-
- return results;
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupBase::make_input_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name)
-{
- auto input_vstream_infos = m_network_group_metadata.get_input_vstream_infos(network_name);
- CHECK_EXPECTED(input_vstream_infos);
-
- std::map<std::string, hailo_vstream_params_t> res;
- auto status = Hef::Impl::fill_missing_vstream_params_with_default(res, input_vstream_infos.value(), quantized,
- format_type, timeout_ms, queue_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return res;
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupBase::make_output_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name)
-{
- auto output_vstream_infos = m_network_group_metadata.get_output_vstream_infos(network_name);
- CHECK_EXPECTED(output_vstream_infos);
- std::map<std::string, hailo_vstream_params_t> res;
- auto status = Hef::Impl::fill_missing_vstream_params_with_default(res, output_vstream_infos.value(), quantized,
- format_type, timeout_ms, queue_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return res;
-}
-
-Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroupBase::get_network_infos() const
-{
- return m_network_group_metadata.get_network_infos();
-}
-
-Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroupBase::get_all_stream_infos(
- const std::string &network_name) const
-{
- return m_network_group_metadata.get_all_stream_infos(network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupBase::get_input_vstream_infos(
- const std::string &network_name) const
-{
- return m_network_group_metadata.get_input_vstream_infos(network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupBase::get_output_vstream_infos(
- const std::string &network_name) const
-{
- return m_network_group_metadata.get_output_vstream_infos(network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupBase::get_all_vstream_infos(
- const std::string &network_name) const
-{
- return m_network_group_metadata.get_all_vstream_infos(network_name);
-}
-
-AccumulatorPtr ConfiguredNetworkGroupBase::get_activation_time_accumulator() const
-{
- return m_activation_time_accumulator;
-}
-
-AccumulatorPtr ConfiguredNetworkGroupBase::get_deactivation_time_accumulator() const
-{
- return m_deactivation_time_accumulator;
-}
-
-static hailo_vstream_params_t expand_vstream_params_autos(const hailo_stream_info_t &stream_info,
- const hailo_vstream_params_t &vstream_params)
-{
- auto local_vstream_params = vstream_params;
- local_vstream_params.user_buffer_format = HailoRTDefaults::expand_auto_format(vstream_params.user_buffer_format,
- stream_info.format);
- return local_vstream_params;
-}
-
-static std::map<std::string, hailo_vstream_info_t> vstream_infos_vector_to_map(std::vector<hailo_vstream_info_t> &&vstream_info_vector)
-{
- std::map<std::string, hailo_vstream_info_t> vstream_infos_map;
- for (const auto &vstream_info : vstream_info_vector) {
- vstream_infos_map.emplace(std::string(vstream_info.name), vstream_info);
- }
-
- return vstream_infos_map;
-}
-
-Expected<std::vector<InputVStream>> ConfiguredNetworkGroupBase::create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params)
-{
- auto input_vstream_infos = get_input_vstream_infos();
- CHECK_EXPECTED(input_vstream_infos);
- auto input_vstream_infos_map = vstream_infos_vector_to_map(input_vstream_infos.release());
-
- std::vector<InputVStream> vstreams;
- vstreams.reserve(inputs_params.size());
- for (const auto &name_params_pair : inputs_params) {
- CHECK_AS_EXPECTED(contains(m_input_streams, name_params_pair.first), HAILO_NOT_FOUND);
- auto input_stream = m_input_streams.at(name_params_pair.first);
-
- const auto vstream_info = input_vstream_infos_map.find(name_params_pair.first);
- CHECK_AS_EXPECTED(vstream_info != input_vstream_infos_map.end(), HAILO_NOT_FOUND,
- "Failed to find vstream info of {}", name_params_pair.first);
-
- const auto vstream_params = expand_vstream_params_autos(input_stream->get_info(), name_params_pair.second);
- auto inputs = VStreamsBuilderUtils::create_inputs(input_stream, vstream_info->second, vstream_params);
- CHECK_EXPECTED(inputs);
-
- vstreams.insert(vstreams.end(), std::make_move_iterator(inputs->begin()), std::make_move_iterator(inputs->end()));
- }
- return vstreams;
-}
-
-Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupBase::create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params)
-{
- std::vector<OutputVStream> vstreams;
- vstreams.reserve(outputs_params.size());
- auto output_streams = get_output_streams_from_vstream_names(outputs_params);
- CHECK_EXPECTED(output_streams);
-
- auto output_vstream_infos = get_output_vstream_infos();
- CHECK_EXPECTED(output_vstream_infos);
- auto output_vstream_infos_map = vstream_infos_vector_to_map(output_vstream_infos.release());
-
- // We iterate through all output streams, and if they are nms, we collect them together by their original stream name.
- // We need this step because all nms output streams of the same original stream need to be fused together
-
- std::unordered_map<std::string, std::shared_ptr<NetFlowElement>> post_process_nms_ops;
- std::set<std::string> post_process_stream_inputs;
- for (auto &op : m_net_flow_ops) {
- CHECK_AS_EXPECTED(op->type == NetFlowElement::Type::YoloNmsOp, HAILO_INVALID_ARGUMENT,
- "Unexpected operation: {}", op->name);
- post_process_nms_ops.insert({op->name, op});
- post_process_stream_inputs.insert(op->input_streams.begin(), op->input_streams.end());
- }
- std::map<std::string, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>> nms_op_output_streams;
- std::map<std::string, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>> nms_output_streams;
- for (auto &stream_params_pair : output_streams.value()) {
- if ((HAILO_FORMAT_ORDER_HAILO_NMS == stream_params_pair.first->get_info().format.order && stream_params_pair.first->get_info().nms_info.is_defused) &&
- (outputs_params.end() != outputs_params.find(stream_params_pair.first->get_info().nms_info.defuse_info.original_name))) {
- auto original_name = stream_params_pair.first->get_info().nms_info.defuse_info.original_name;
- nms_output_streams.emplace(original_name, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>(
- OutputStreamPtrVector(), outputs_params.at(original_name)));
- nms_output_streams[original_name].first.push_back(stream_params_pair.first);
- } else if (post_process_stream_inputs.count(stream_params_pair.first->get_info().name)) {
- for (auto &op : m_net_flow_ops) {
- if (op->input_streams.count(stream_params_pair.first->get_info().name)) {
- CHECK_AS_EXPECTED(op->type == NetFlowElement::Type::YoloNmsOp,
- HAILO_INVALID_ARGUMENT, "Expected post-process YOLO-NMS operation");
- assert(op->output_pads.size() == 1);
- nms_op_output_streams.emplace(op->name, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>(
- OutputStreamPtrVector(), outputs_params.at(op->output_pads[0].name)));
- nms_op_output_streams[op->name].first.push_back(stream_params_pair.first);
- }
- }
- } else {
- auto outputs = VStreamsBuilderUtils::create_outputs(stream_params_pair.first, stream_params_pair.second, output_vstream_infos_map);
- CHECK_EXPECTED(outputs);
- vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
- }
- }
- for (auto &nms_output_stream_pair : nms_output_streams) {
- auto outputs = VStreamsBuilderUtils::create_output_nms(nms_output_stream_pair.second.first, nms_output_stream_pair.second.second,
- output_vstream_infos_map);
- CHECK_EXPECTED(outputs);
- vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
- }
- for (auto &nms_output_stream_pair : nms_op_output_streams) {
- auto op = post_process_nms_ops.at(nms_output_stream_pair.first);
- assert(op->type == NetFlowElement::Type::YoloNmsOp);
- auto nms_op = std::static_pointer_cast<NetFlowYoloNmsElement>(op);
- auto outputs = VStreamsBuilderUtils::create_output_post_process_nms(nms_output_stream_pair.second.first,
- nms_output_stream_pair.second.second, output_vstream_infos_map,
- *nms_op);
- CHECK_EXPECTED(outputs);
- vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
- }
- return vstreams;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group_internal.hpp
- * @brief Class declaration for ConfiguredNetworkGroupBase and ActivatedNetworkGroupBase that implement the basic ConfiguredNetworkGroup
- * and ActivatedNetworkGroup interfaces. All internal classes that are relevant should inherit from the
- * ConfiguredNetworkGroupBase and ActivatedNetworkGroupBase classes.
- * Hence, the hierarchy is as follows:
- * --------------------------------------------------------------------------------------------------------------
- * | ConfiguredNetworkGroup | (External "interface")
- * | ________________________________|________________________________ |
- * | / \ |
- * | ConfiguredNetworkGroupBase ConfiguredNetworkGroupClient | (Base classes)
- * | / | \ |
- * | VdmaConfigNetworkGroup | HcpConfigNetworkGroup | (Actual implementations)
- * | VDeviceNetworkGroup |
- * | | |
- * | vector of VdmaConfigNetworkGroup |
- * -------------------------------------------------------------------------------------------------------------|
- * | ActivatedNetworkGroup | (External "interface")
- * | | |
- * | ActivatedNetworkGroupBase | (Base classes)
- * | __________________|_____________________________________________________ |
- * | / | \ |
- * | VdmaConfigActivatedNetworkGroup VDeviceActivatedNetworkGroup HcpConfigActivatedNetworkGroup | (Actual implementations)
- * | | |
- * | vector of VdmaConfigActivatedNetworkGroup |
- * --------------------------------------------------------------------------------------------------------------
- **/
-
-#ifndef _HAILO_NETWORK_GROUP_INTERNAL_HPP_
-#define _HAILO_NETWORK_GROUP_INTERNAL_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/network_group.hpp"
-#include "hef_internal.hpp"
-#include "common/latency_meter.hpp"
-#include "control_protocol.h"
-#include "vdma_channel.hpp"
-#include "context_switch/active_network_group_holder.hpp"
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#include "hailort_rpc_client.hpp"
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-namespace hailort
-{
-
-/** Represents a vector of InputStream ptrs */
-using InputStreamPtrVector = std::vector<std::shared_ptr<InputStream>>;
-
-/** Represents a vector of OutputStream ptrs */
-using OutputStreamPtrVector = std::vector<std::shared_ptr<OutputStream>>;
-
-class ActivatedNetworkGroupBase : public ActivatedNetworkGroup
-{
-public:
- virtual ~ActivatedNetworkGroupBase() = default;
- ActivatedNetworkGroupBase(const ActivatedNetworkGroupBase &other) = delete;
- ActivatedNetworkGroupBase &operator=(const ActivatedNetworkGroupBase &other) = delete;
- ActivatedNetworkGroupBase &operator=(ActivatedNetworkGroupBase &&other) = delete;
- ActivatedNetworkGroupBase(ActivatedNetworkGroupBase &&other) = default;
-
- virtual uint32_t get_invalid_frames_count() override;
-
-protected:
- hailo_activate_network_group_params_t m_network_group_params;
-
- ActivatedNetworkGroupBase(const hailo_activate_network_group_params_t &network_group_params,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- EventPtr &&network_group_activated_event, hailo_status &status);
-
- EventPtr m_network_group_activated_event;
- std::map<std::string, std::shared_ptr<InputStream>> &m_input_streams;
- std::map<std::string, std::shared_ptr<OutputStream>> &m_output_streams;
-
-private:
- hailo_status validate_network_group_params(const hailo_activate_network_group_params_t &network_group_params);
-};
-
-class ConfiguredNetworkGroupBase : public ConfiguredNetworkGroup
-{
-public:
- virtual ~ConfiguredNetworkGroupBase() = default;
- ConfiguredNetworkGroupBase(const ConfiguredNetworkGroupBase &other) = delete;
- ConfiguredNetworkGroupBase &operator=(const ConfiguredNetworkGroupBase &other) = delete;
- ConfiguredNetworkGroupBase &operator=(ConfiguredNetworkGroupBase &&other) = delete;
- ConfiguredNetworkGroupBase(ConfiguredNetworkGroupBase &&other) = default;
-
- Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_with_batch(
- uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
- virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(const hailo_activate_network_group_params_t &network_group_params) override;
- virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override;
-
- virtual const std::string &get_network_group_name() const override;
- virtual const std::string &name() const override;
-
- virtual Expected<InputStreamRefVector> get_input_streams_by_network(const std::string &network_name="") override;
- virtual Expected<OutputStreamRefVector> get_output_streams_by_network(const std::string &network_name="") override;
- virtual InputStreamRefVector get_input_streams() override;
- virtual OutputStreamRefVector get_output_streams() override;
- virtual std::vector<std::reference_wrapper<InputStream>> get_input_streams_by_interface(hailo_stream_interface_t stream_interface) override;
- virtual std::vector<std::reference_wrapper<OutputStream>> get_output_streams_by_interface(hailo_stream_interface_t stream_interface) override;
- virtual ExpectedRef<InputStream> get_input_stream_by_name(const std::string& name) override;
- virtual ExpectedRef<OutputStream> get_output_stream_by_name(const std::string& name) override;
- virtual Expected<OutputStreamWithParamsVector> get_output_streams_from_vstream_names(
- const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
- virtual Expected<LatencyMeasurementResult> get_latency_measurement(const std::string &network_name="") override;
-
- virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name="") override;
- virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name="") override;
-
- virtual Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> make_output_vstream_params_groups(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size) override;
-
- virtual Expected<std::vector<std::vector<std::string>>> get_output_vstream_groups() override;
-
- virtual hailo_status activate_impl(uint16_t dynamic_batch_size) = 0;
- virtual hailo_status deactivate_impl() = 0;
-
- virtual Expected<std::vector<hailo_network_info_t>> get_network_infos() const override;
- virtual Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name="") const override;
- virtual Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name="") const override;
- virtual Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name="") const override;
- virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const override;
- virtual AccumulatorPtr get_activation_time_accumulator() const override;
- virtual AccumulatorPtr get_deactivation_time_accumulator() const override;
- hailo_status create_streams_from_config_params(Device &device);
-
- virtual bool is_multi_context() const override;
- virtual const ConfigureNetworkParams get_config_params() const override;
-
- static Expected<LatencyMeterPtr> create_hw_latency_meter(Device &device,
- const std::vector<LayerInfo> &layers);
-
- Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name)
- {
- return m_network_group_metadata.get_vstream_names_from_stream_name(stream_name);
- }
-
- const SupportedFeatures &get_supported_features()
- {
- return m_network_group_metadata.supported_features();
- }
-
- Expected<uint16_t> get_stream_batch_size(const std::string &stream_name);
-
- virtual Expected<std::vector<InputVStream>> create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params);
- virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params);
-
- std::map<std::string, std::shared_ptr<InputStream>> m_input_streams;
- std::map<std::string, std::shared_ptr<OutputStream>> m_output_streams;
-
-protected:
- ConfiguredNetworkGroupBase(const ConfigureNetworkParams &config_params,
- const NetworkGroupMetadata &network_group_metadata, std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops, hailo_status &status);
-
- virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
- const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size) = 0;
-
- hailo_status create_output_stream_from_config_params(Device &device,
- const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
- hailo_status create_input_stream_from_config_params(Device &device,
- const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
- hailo_status add_mux_streams_by_edges_names(OutputStreamWithParamsVector &result,
- const std::unordered_map<std::string, hailo_vstream_params_t> &outputs_edges_params);
- Expected<OutputStreamPtrVector> get_output_streams_by_vstream_name(const std::string &name);
-
- hailo_status activate_low_level_streams(uint16_t dynamic_batch_size);
- hailo_status deactivate_low_level_streams();
-
- Expected<LayerInfo> get_layer_info(const std::string &stream_name);
-
- virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() = 0;
- virtual Expected<std::shared_ptr<VdmaChannel>> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) = 0;
-
- const ConfigureNetworkParams m_config_params;
- const uint16_t m_min_configured_batch_size; // TODO: remove after HRT-6535
- EventPtr m_network_group_activated_event;
- const NetworkGroupMetadata m_network_group_metadata;
- AccumulatorPtr m_activation_time_accumulator;
- AccumulatorPtr m_deactivation_time_accumulator;
-
-private:
- friend class VDeviceNetworkGroup;
-
- static uint16_t get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params);
-
- std::vector<std::shared_ptr<NetFlowElement>> m_net_flow_ops;
-};
-
-using ActiveNetGroupHolder = ActiveNetworkGroupHolder<ConfiguredNetworkGroupBase>;
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-class ConfiguredNetworkGroupClient : public ConfiguredNetworkGroup
-{
-public:
- ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle);
-
- virtual ~ConfiguredNetworkGroupClient();
- ConfiguredNetworkGroupClient(const ConfiguredNetworkGroupClient &other) = delete;
- ConfiguredNetworkGroupClient &operator=(const ConfiguredNetworkGroupClient &other) = delete;
- ConfiguredNetworkGroupClient &operator=(ConfiguredNetworkGroupClient &&other) = delete;
- ConfiguredNetworkGroupClient(ConfiguredNetworkGroupClient &&other) = default;
-
- virtual const std::string &get_network_group_name() const override;
- virtual const std::string &name() const override;
- virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
- virtual std::vector<std::reference_wrapper<InputStream>> get_input_streams_by_interface(hailo_stream_interface_t stream_interface) override;
- virtual std::vector<std::reference_wrapper<OutputStream>> get_output_streams_by_interface(hailo_stream_interface_t stream_interface) override;
- virtual ExpectedRef<InputStream> get_input_stream_by_name(const std::string &name) override;
- virtual ExpectedRef<OutputStream> get_output_stream_by_name(const std::string &name) override;
- virtual Expected<InputStreamRefVector> get_input_streams_by_network(const std::string &network_name="") override;
- virtual Expected<OutputStreamRefVector> get_output_streams_by_network(const std::string &network_name="") override;
- virtual InputStreamRefVector get_input_streams() override;
- virtual OutputStreamRefVector get_output_streams() override;
- virtual Expected<OutputStreamWithParamsVector> get_output_streams_from_vstream_names(
- const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
-
- virtual Expected<LatencyMeasurementResult> get_latency_measurement(const std::string &network_name="") override;
- virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(const hailo_activate_network_group_params_t &network_group_params) override;
- virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override;
-
- virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name="") override;
- virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name="") override;
- virtual Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> make_output_vstream_params_groups(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size) override;
- virtual Expected<std::vector<std::vector<std::string>>> get_output_vstream_groups() override;
-
- virtual Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name="") const override;
- virtual Expected<std::vector<hailo_network_info_t>> get_network_infos() const override;
- virtual Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name="") const override;
- virtual Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name="") const override;
- virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const override;
-
- virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
- virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
-
- virtual AccumulatorPtr get_activation_time_accumulator() const override;
- virtual AccumulatorPtr get_deactivation_time_accumulator() const override;
-
- virtual bool is_multi_context() const override;
- virtual const ConfigureNetworkParams get_config_params() const override;
-
- virtual Expected<std::vector<InputVStream>> create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params);
- virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params);
-
-private:
- std::unique_ptr<HailoRtRpcClient> m_client;
- uint32_t m_handle;
- std::string m_network_group_name;
-};
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-} /* namespace hailort */
-
-#endif /* _HAILO_NETWORK_GROUP_INTERNAL_HPP_ */
+++ /dev/null
-#include "multi_context/resource_manager.hpp"
-#include "control.hpp"
-#include "hailort_defaults.hpp"
-#include <numeric>
-
-namespace hailort
-{
-
-
-Expected<ContextResources> ContextResources::create(HailoRTDriver &driver,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, const std::vector<vdma::ChannelId> &config_channels_ids,
- const ConfigBufferInfoMap &config_buffer_infos)
-{
- CHECK_AS_EXPECTED(context_type < CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_COUNT, HAILO_INVALID_ARGUMENT);
-
- CHECK_AS_EXPECTED(config_buffer_infos.size() <= config_channels_ids.size(), HAILO_INTERNAL_FAILURE,
- "config_buffer_infos size ({}) is bigger than config_channels_id count ({})",
- config_buffer_infos.size(), config_channels_ids.size());
-
- std::vector<ConfigBuffer> config_buffers;
- config_buffers.reserve(config_buffer_infos.size());
- for (uint8_t config_stream_index = 0; config_stream_index < config_buffer_infos.size(); config_stream_index++) {
- auto buffer_resource = ConfigBuffer::create(driver, config_channels_ids[config_stream_index],
- config_buffer_infos.at(config_stream_index));
- CHECK_EXPECTED(buffer_resource);
- config_buffers.emplace_back(buffer_resource.release());
- }
-
- return ContextResources(driver, context_type, std::move(config_buffers));
-}
-
-const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &ContextResources::get_controls() const
-{
- return m_builder.get_controls();
-}
-
-ContextSwitchBufferBuilder &ContextResources::builder()
-{
- return m_builder;
-}
-
-const std::vector<BoundaryEdgeLayer> &ContextResources::get_boundary_layers() const
-{
- return m_boundary_layers;
-}
-
-const std::vector<InterContextEdgeLayer> &ContextResources::get_inter_context_layers() const
-{
- return m_inter_context_layers;
-}
-
-const std::vector<DdrChannelEdgeLayer> &ContextResources::get_ddr_channel_layers() const
-{
- return m_ddr_channel_layers;
-}
-
-ExpectedRef<DdrChannelsPair> ContextResources::create_ddr_channels_pair(const DdrChannelsInfo &ddr_info)
-{
- auto buffer = DdrChannelsPair::create(m_driver, ddr_info);
- CHECK_EXPECTED(buffer);
-
- m_ddr_channels_pairs.emplace_back(buffer.release());
- return std::ref(m_ddr_channels_pairs.back());
-}
-
-ExpectedRef<const DdrChannelsPair> ContextResources::get_ddr_channels_pair(uint8_t d2h_stream_index) const
-{
- for (auto &ddr_channels_pair : m_ddr_channels_pairs) {
- if (ddr_channels_pair.info().d2h_stream_index == d2h_stream_index) {
- return std::ref(ddr_channels_pair);
- }
- }
-
- LOGGER__ERROR("Couldn't find ddr channels pair for {}", d2h_stream_index);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-const std::vector<DdrChannelsPair> &ContextResources::get_ddr_channels_pairs() const
-{
- return m_ddr_channels_pairs;
-}
-
-std::vector<BoundaryEdgeLayer> ContextResources::get_boundary_layers(hailo_stream_direction_t direction) const
-{
- std::vector<BoundaryEdgeLayer> edge_layers;
- for (const auto &edge_layer : m_boundary_layers) {
- if (edge_layer.layer_info.direction == direction) {
- edge_layers.push_back(edge_layer);
- }
- }
- return edge_layers;
-}
-
-std::vector<InterContextEdgeLayer> ContextResources::get_inter_context_layers(hailo_stream_direction_t direction) const
-{
- std::vector<InterContextEdgeLayer> edge_layers;
- for (const auto &edge_layer : m_inter_context_layers) {
- if (edge_layer.layer_info.direction == direction) {
- edge_layers.push_back(edge_layer);
- }
- }
- return edge_layers;
-}
-
-std::vector<DdrChannelEdgeLayer> ContextResources::get_ddr_channel_layers(hailo_stream_direction_t direction) const
-{
- std::vector<DdrChannelEdgeLayer> edge_layers;
- for (const auto &edge_layer : m_ddr_channel_layers) {
- if (edge_layer.layer_info.direction == direction) {
- edge_layers.push_back(edge_layer);
- }
- }
- return edge_layers;
-}
-
-hailo_status ContextResources::validate_edge_layers()
-{
- std::set<vdma::ChannelId> used_channel_ids;
- for (const auto &edge_layer : get_boundary_layers()) {
- CHECK(used_channel_ids.find(edge_layer.channel_id) == used_channel_ids.end(), HAILO_INTERNAL_FAILURE,
- "Same stream use the same channel id {}", edge_layer.channel_id);
- used_channel_ids.insert(edge_layer.channel_id);
- }
-
- for (const auto &edge_layer : get_inter_context_layers()) {
- CHECK(used_channel_ids.find(edge_layer.channel_id) == used_channel_ids.end(), HAILO_INTERNAL_FAILURE,
- "Same stream use the same channel id {}", edge_layer.channel_id);
- used_channel_ids.insert(edge_layer.channel_id);
- }
-
- for (const auto &edge_layer : get_ddr_channel_layers()) {
- CHECK(used_channel_ids.find(edge_layer.channel_id) == used_channel_ids.end(), HAILO_INTERNAL_FAILURE,
- "Same stream use the same channel id {}", edge_layer.channel_id);
- used_channel_ids.insert(edge_layer.channel_id);
- }
-
- return HAILO_SUCCESS;
-}
-
-std::vector<ConfigBuffer> &ContextResources::get_config_buffers()
-{
- return m_config_buffers;
-}
-
-static Expected<LatencyMeterPtr> create_hw_latency_meter(const std::vector<LayerInfo> &layers)
-{
- std::set<std::string> d2h_channel_names;
-
- size_t h2d_streams_count = 0;
- for (const auto &layer : layers) {
- if (layer.direction == HAILO_D2H_STREAM) {
- if (HAILO_FORMAT_ORDER_HAILO_NMS == layer.format.order) {
- LOGGER__WARNING("HW Latency measurement is not supported on NMS networks");
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- d2h_channel_names.insert(layer.name);
- }
- else {
- h2d_streams_count++;
- }
- }
-
- if (h2d_streams_count > 1) {
- LOGGER__WARNING("HW Latency measurement is supported on networks with a single input");
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- return make_shared_nothrow<LatencyMeter>(d2h_channel_names, MAX_IRQ_TIMESTAMPS_SIZE);
-}
-
-static Expected<LatencyMetersMap> create_latency_meters_from_config_params(
- const ConfigureNetworkParams &config_params, std::shared_ptr<NetworkGroupMetadata> network_group_metadata)
-{
- LatencyMetersMap latency_meters_map;
-
- if ((config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) {
- // Best affort for starting latency meter.
- auto networks_names = network_group_metadata->get_network_names();
- for (auto &network_name : networks_names) {
- auto layer_infos = network_group_metadata->get_all_layer_infos(network_name);
- CHECK_EXPECTED(layer_infos);
- auto latency_meter = create_hw_latency_meter(layer_infos.value());
- if (latency_meter) {
- latency_meters_map.emplace(network_name, latency_meter.release());
- LOGGER__DEBUG("Starting hw latency measurement for network {}", network_name);
- }
- }
- }
-
- return latency_meters_map;
-}
-
-Expected<ResourcesManager> ResourcesManager::create(VdmaDevice &vdma_device, HailoRTDriver &driver,
- const ConfigureNetworkParams &config_params, std::shared_ptr<NetworkGroupMetadata> network_group_metadata,
- uint8_t net_group_index)
-{
- // Allocate config channels. In order to use the same channel ids for config channels in all contexts,
- // we allocate all of them here, and use in preliminary/dynamic context.
- ChannelAllocator allocator(driver.dma_engines_count());
- std::vector<vdma::ChannelId> config_channels_ids;
- const auto &config_channels_info = network_group_metadata->config_channels_info();
- config_channels_ids.reserve(config_channels_info.size());
- for (uint8_t cfg_index = 0; cfg_index < config_channels_info.size(); cfg_index++) {
- const auto layer_identifier = std::make_tuple(LayerType::CFG, "", cfg_index);
- const auto engine_index = config_channels_info[cfg_index].engine_index;
- auto channel_id = allocator.get_available_channel_id(layer_identifier, VdmaChannel::Direction::H2D, engine_index);
- CHECK_EXPECTED(channel_id);
- config_channels_ids.push_back(channel_id.release());
- }
-
- auto network_index_map = network_group_metadata->get_network_names();
-
- auto latency_meters = create_latency_meters_from_config_params(config_params, network_group_metadata);
- CHECK_EXPECTED(latency_meters);
- ResourcesManager resources_manager(vdma_device, driver, std::move(allocator), config_params,
- std::move(network_group_metadata), net_group_index,
- std::move(network_index_map), latency_meters.release(), std::move(config_channels_ids));
-
- return resources_manager;
-}
-
-ResourcesManager::ResourcesManager(VdmaDevice &vdma_device, HailoRTDriver &driver,
- ChannelAllocator &&channel_allocator, const ConfigureNetworkParams config_params,
- std::shared_ptr<NetworkGroupMetadata> &&network_group_metadata,
- uint8_t net_group_index, const std::vector<std::string> &&network_index_map,
- LatencyMetersMap &&latency_meters,
- std::vector<vdma::ChannelId> &&config_channels_ids) :
- m_contexts_resources(),
- m_channel_allocator(std::move(channel_allocator)),
- m_vdma_device(vdma_device),
- m_driver(driver),
- m_config_params(config_params),
- m_inter_context_buffers(),
- m_internal_channels(),
- m_network_group_metadata(std::move(network_group_metadata)),
- m_net_group_index(net_group_index),
- m_dynamic_context_count(0),
- m_total_context_count(0),
- m_network_index_map(std::move(network_index_map)),
- m_latency_meters(std::move(latency_meters)),
- m_boundary_channels(),
- m_is_configured(false),
- m_config_channels_ids(std::move(config_channels_ids))
-{}
-
-ResourcesManager::ResourcesManager(ResourcesManager &&other) noexcept :
- m_contexts_resources(std::move(other.m_contexts_resources)),
- m_channel_allocator(std::move(other.m_channel_allocator)),
- m_vdma_device(other.m_vdma_device),
- m_driver(other.m_driver),
- m_config_params(other.m_config_params),
- m_inter_context_buffers(std::move(other.m_inter_context_buffers)),
- m_internal_channels(std::move(other.m_internal_channels)),
- m_network_group_metadata(std::move(other.m_network_group_metadata)),
- m_net_group_index(other.m_net_group_index),
- m_dynamic_context_count(std::exchange(other.m_dynamic_context_count, static_cast<uint8_t>(0))),
- m_total_context_count(std::exchange(other.m_total_context_count, static_cast<uint8_t>(0))),
- m_network_index_map(std::move(other.m_network_index_map)),
- m_latency_meters(std::move(other.m_latency_meters)),
- m_boundary_channels(std::move(other.m_boundary_channels)),
- m_is_configured(std::exchange(other.m_is_configured, false)),
- m_config_channels_ids(std::move(other.m_config_channels_ids))
-{}
-
-hailo_status ResourcesManager::fill_infer_features(CONTROL_PROTOCOL__application_header_t &app_header)
-{
- app_header.infer_features.preliminary_run_asap = m_network_group_metadata->supported_features().preliminary_run_asap;
- return HAILO_SUCCESS;
-}
-
-
-hailo_status ResourcesManager::fill_validation_features(CONTROL_PROTOCOL__application_header_t &app_header)
-{
- static const auto ABBALE_NOT_SUPPORTED = false;
- // TODO: fix is_abbale_supported
- // auto proto_message = hef.pimpl.proto_message();
- // auto has_included_features = proto_message->has_included_features();
- // if (has_included_features) {
- // is_abbale_supported = proto_message->included_features().abbale();
- // }
- app_header.validation_features.is_abbale_supported = ABBALE_NOT_SUPPORTED;
- return HAILO_SUCCESS;
-}
-
-hailo_status ResourcesManager::fill_network_batch_size(CONTROL_PROTOCOL__application_header_t &app_header)
-{
- app_header.networks_count = static_cast<uint8_t>(m_config_params.network_params_by_name.size());
- for (const auto &network_pair : m_config_params.network_params_by_name) {
- auto network_name_from_params = network_pair.first;
- uint8_t network_index = 0;
- for (network_index = 0; network_index < m_network_index_map.size(); network_index++) {
- auto const network_name_from_map = m_network_index_map[network_index];
- if (network_name_from_map == network_name_from_params) {
- auto batch_size = get_network_batch_size(network_name_from_params);
- CHECK_EXPECTED_AS_STATUS(batch_size);
- app_header.batch_size[network_index] = batch_size.value();
- break;
- }
- }
- if (m_network_index_map.size() == network_index) {
- LOGGER__ERROR("Failed to find network with network name {}", network_name_from_params);
- return HAILO_NOT_FOUND;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ResourcesManager::create_internal_vdma_channels()
-{
- auto internal_channel_ids = m_channel_allocator.get_internal_channel_ids();
-
- m_internal_channels.reserve(internal_channel_ids.size());
- for (const auto &ch : internal_channel_ids) {
- auto direction = (ch.channel_index < MIN_D2H_CHANNEL_INDEX) ? VdmaChannel::Direction::H2D : VdmaChannel::Direction::D2H;
- auto vdma_channel = VdmaChannel::create(ch, direction, m_driver, m_vdma_device.get_default_desc_page_size());
- CHECK_EXPECTED_AS_STATUS(vdma_channel);
- m_internal_channels.emplace_back(vdma_channel.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ResourcesManager::create_boundary_vdma_channel(const LayerInfo &layer_info)
-{
- // TODO: put in layer info
- const auto channel_direction = layer_info.direction == HAILO_H2D_STREAM ? VdmaChannel::Direction::H2D :
- VdmaChannel::Direction::D2H;
- const auto channel_id = get_available_channel_id(to_layer_identifier(layer_info),
- channel_direction, layer_info.dma_engine_index);
- CHECK_EXPECTED_AS_STATUS(channel_id);
-
- auto network_batch_size = get_network_batch_size(layer_info.network_name);
- CHECK_EXPECTED_AS_STATUS(network_batch_size);
-
- uint32_t min_active_trans = MIN_ACTIVE_TRANSFERS_SCALE * network_batch_size.value();
- uint32_t max_active_trans = MAX_ACTIVE_TRANSFERS_SCALE * network_batch_size.value();
-
- CHECK(IS_FIT_IN_UINT16(min_active_trans), HAILO_INVALID_ARGUMENT,
- "calculated min_active_trans for vdma descriptor list is out of UINT16 range");
- CHECK(IS_FIT_IN_UINT16(max_active_trans), HAILO_INVALID_ARGUMENT,
- "calculated min_active_trans for vdma descriptor list is out of UINT16 range");
-
- auto latency_meter = (contains(m_latency_meters, layer_info.network_name)) ? m_latency_meters.at(layer_info.network_name) : nullptr;
-
- /* TODO - HRT-6829- page_size should be calculated inside the vDMA channel class create function */
- const auto transfer_size = (layer_info.nn_stream_config.periph_bytes_per_buffer *
- layer_info.nn_stream_config.core_buffers_per_frame);
- auto desc_sizes_pair = VdmaDescriptorList::get_desc_buffer_sizes_for_single_transfer(m_driver,
- static_cast<uint16_t>(min_active_trans), static_cast<uint16_t>(max_active_trans), transfer_size);
- CHECK_EXPECTED_AS_STATUS(desc_sizes_pair);
-
- const auto page_size = desc_sizes_pair->first;
- const auto descs_count = desc_sizes_pair->second;
- auto channel = VdmaChannel::create(channel_id.value(), channel_direction, m_driver, page_size,
- layer_info.name, latency_meter, network_batch_size.value());
- CHECK_EXPECTED_AS_STATUS(channel);
- const auto status = channel->allocate_resources(descs_count);
- CHECK_SUCCESS(status);
-
- auto channel_ptr = make_shared_nothrow<VdmaChannel>(channel.release());
- CHECK_NOT_NULL(channel_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- m_boundary_channels.emplace(layer_info.name, channel_ptr);
- return HAILO_SUCCESS;
-}
-
-Expected<std::shared_ptr<VdmaChannel>> ResourcesManager::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
-{
- auto boundary_channel_it = m_boundary_channels.find(stream_name);
- if (std::end(m_boundary_channels) == boundary_channel_it) {
- return make_unexpected(HAILO_NOT_FOUND);
- }
-
- return std::shared_ptr<VdmaChannel>(boundary_channel_it->second);
-}
-
-Expected<std::shared_ptr<const VdmaChannel>> ResourcesManager::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) const
-{
- auto boundary_channel_it = m_boundary_channels.find(stream_name);
- if (std::end(m_boundary_channels) == boundary_channel_it) {
- return make_unexpected(HAILO_NOT_FOUND);
- }
-
- return std::shared_ptr<const VdmaChannel>(boundary_channel_it->second);
-}
-
-hailo_power_mode_t ResourcesManager::get_power_mode() const
-{
- return m_config_params.power_mode;
-}
-
-ExpectedRef<InterContextBuffer> ResourcesManager::create_inter_context_buffer(uint32_t transfer_size,
- uint8_t src_stream_index, uint8_t src_context_index, const std::string &network_name)
-{
- auto network_batch_size_exp = get_network_batch_size(network_name);
- CHECK_EXPECTED(network_batch_size_exp);
- auto network_batch_size = network_batch_size_exp.value();
-
- auto buffer = InterContextBuffer::create(m_driver, transfer_size, network_batch_size);
- CHECK_EXPECTED(buffer);
-
- const auto key = std::make_pair(src_context_index, src_stream_index);
- auto emplace_res = m_inter_context_buffers.emplace(key, buffer.release());
- return std::ref(emplace_res.first->second);
-}
-
-ExpectedRef<InterContextBuffer> ResourcesManager::get_inter_context_buffer(const IntermediateBufferKey &key)
-{
- auto buffer_it = m_inter_context_buffers.find(key);
- if (std::end(m_inter_context_buffers) == buffer_it) {
- return make_unexpected(HAILO_NOT_FOUND);
- }
-
- return std::ref(buffer_it->second);
-}
-
-Expected<CONTROL_PROTOCOL__application_header_t> ResourcesManager::get_control_network_group_header()
-{
- CONTROL_PROTOCOL__application_header_t app_header{};
- app_header.dynamic_contexts_count = m_dynamic_context_count;
-
- auto status = fill_infer_features(app_header);
- CHECK_SUCCESS_AS_EXPECTED(status, "Invalid infer features");
- status = fill_validation_features(app_header);
- CHECK_SUCCESS_AS_EXPECTED(status, "Invalid validation features");
- status = fill_network_batch_size(app_header);
- CHECK_SUCCESS_AS_EXPECTED(status, "Invalid network batch sizes");
-
- return app_header;
-}
-
-Expected<std::reference_wrapper<ContextResources>> ResourcesManager::add_new_context(CONTROL_PROTOCOL__context_switch_context_type_t type,
- const ConfigBufferInfoMap &config_info)
-{
- CHECK_AS_EXPECTED(m_total_context_count < std::numeric_limits<uint8_t>::max(), HAILO_INVALID_CONTEXT_COUNT);
-
- auto context_resources = ContextResources::create(m_driver, type, m_config_channels_ids, config_info);
- CHECK_EXPECTED(context_resources);
-
- m_contexts_resources.emplace_back(context_resources.release());
- m_total_context_count++;
- if (CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC == type) {
- m_dynamic_context_count++;
- }
-
- return std::ref(m_contexts_resources.back());
-}
-
-Expected<vdma::ChannelId> ResourcesManager::get_available_channel_id(const LayerIdentifier &layer_identifier,
- VdmaChannel::Direction direction, uint8_t engine_index)
-{
- if (m_driver.dma_type() == HailoRTDriver::DmaType::PCIE) {
- // On PCIe we have only 1 engine. To support the same HEF with both PCIe and DRAM, we use default engine here
- engine_index = vdma::DEFAULT_ENGINE_INDEX;
- }
-
- return m_channel_allocator.get_available_channel_id(layer_identifier, direction, engine_index);
-}
-
-hailo_status ResourcesManager::free_channel_index(const LayerIdentifier &layer_identifier)
-{
- return m_channel_allocator.free_channel_index(layer_identifier);
-}
-
-Expected<hailo_stream_interface_t> ResourcesManager::get_default_streams_interface()
-{
- return m_vdma_device.get_default_streams_interface();
-}
-
-hailo_status ResourcesManager::register_fw_managed_vdma_channels()
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- for (auto &ch : m_internal_channels) {
- status = ch.register_fw_controlled_channel();
- CHECK_SUCCESS(status);
- }
-
- for (auto &ch : m_boundary_channels) {
- status = ch.second->register_fw_controlled_channel();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ResourcesManager::unregister_fw_managed_vdma_channels()
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- // Note: We don't "unregister" the m_boundary_channels here, beacuse the Vdma*Stream objects will unregister their
- // own channels.
- // TODO: Add one icotl to stop all channels at once (HRT-6097)
- for (auto &ch : m_internal_channels) {
- status = ch.unregister_fw_controlled_channel();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ResourcesManager::set_inter_context_channels_dynamic_batch_size(uint16_t dynamic_batch_size)
-{
- for (auto &key_buff_pair : m_inter_context_buffers) {
- const auto status = key_buff_pair.second.reprogram(dynamic_batch_size);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<uint16_t> ResourcesManager::get_network_batch_size(const std::string &network_name) const
-{
- for (auto const &network_map : m_config_params.network_params_by_name) {
- auto const network_name_from_params = network_map.first;
- if (network_name_from_params == network_name) {
- auto actual_batch_size = network_map.second.batch_size;
- if (HAILO_DEFAULT_BATCH_SIZE == actual_batch_size) {
- actual_batch_size = DEFAULT_ACTUAL_BATCH_SIZE;
- }
- return actual_batch_size;
- }
- }
-
- LOGGER__ERROR("Failed to find network with network name {}", network_name);
-
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-Expected<Buffer> ResourcesManager::read_intermediate_buffer(const IntermediateBufferKey &key)
-{
- auto inter_context_buffer_it = m_inter_context_buffers.find(key);
- if (std::end(m_inter_context_buffers) != inter_context_buffer_it) {
- return inter_context_buffer_it->second.read();
- }
-
- const auto dynamic_context_index = key.first;
- const size_t context_index = dynamic_context_index + CONTROL_PROTOCOL__CONTEXT_SWITCH_NUMBER_OF_NON_DYNAMIC_CONTEXTS;
- CHECK_AS_EXPECTED(context_index < m_contexts_resources.size(), HAILO_NOT_FOUND, "Context index {} out of range",
- dynamic_context_index);
- const auto d2h_stream_index = key.second;
- if (auto ddr_channels_pair = m_contexts_resources[context_index].get_ddr_channels_pair(d2h_stream_index)) {
- return ddr_channels_pair->get().read();
- }
-
- LOGGER__ERROR("Failed to find intermediate buffer for src_context {}, src_stream_index {}", key.first,
- key.second);
- return make_unexpected(HAILO_NOT_FOUND);
-
-}
-
-hailo_status ResourcesManager::configure()
-{
- CHECK(!m_is_configured, HAILO_INTERNAL_FAILURE, "Can't configure the same network group twice");
- m_is_configured = true;
-
- auto net_group_header = get_control_network_group_header();
- CHECK_EXPECTED_AS_STATUS(net_group_header);
-
- auto status = Control::context_switch_set_network_group_header(m_vdma_device, net_group_header.release());
- CHECK_SUCCESS(status);
-
- for (const auto &context : m_contexts_resources) {
- status = Control::context_switch_set_context_info(m_vdma_device, context.get_controls());
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ResourcesManager::enable_state_machine(uint16_t dynamic_batch_size)
-{
- return Control::enable_network_group(m_vdma_device, m_net_group_index, dynamic_batch_size);
-}
-
-hailo_status ResourcesManager::reset_state_machine(bool keep_nn_config_during_reset)
-{
- auto status = Control::reset_context_switch_state_machine(m_vdma_device, keep_nn_config_during_reset);
- CHECK_SUCCESS(status);
-
- if (!keep_nn_config_during_reset && (Device::Type::CORE == m_vdma_device.get_type())) {
- // On core device, the nn_manager is not responsible to reset the nn-core so
- // we use the SCU control for that.
- status = m_vdma_device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file resource_manager_builder.cpp
- * @brief Builds a ResourcesManager object for the given CoreOp.
- **/
-
-#include "resource_manager_builder.hpp"
-#include "control.hpp"
-
-namespace hailort
-{
-
-
-static uint16_t calculate_periph_buffers_per_frame(const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
- uint16_t min_periph_buffers_per_frame, uint32_t frame_size, uint16_t periph_buffers_per_frame)
-{
- const auto max_periph_buffers_per_frame = MIN(frame_size, static_cast<uint32_t>(hw_consts.max_periph_buffers_per_frame));
- // Fifo copies FIFO_WORD_GRANULARITY_IN_BYTES each time from/to the fifo
- const uint32_t frame_size_words_count = frame_size / hw_consts.fifo_word_granularity_bytes;
- // Look for the highest periph_bytes_per_buffer (frame_size / periph_buffers_per_frame) that is a multiple of FIFO_WORD_GRANULARITY_IN_BYTES
- for (uint16_t i = min_periph_buffers_per_frame; i < max_periph_buffers_per_frame; i++) {
- // (0 == (frame_size_words_count % i) ensures periph_bytes_per_buffer will be a multiple of FIFO_WORD_GRANULARITY_IN_BYTES
- if ((0 == (frame_size_words_count % i)) && (hw_consts.max_periph_bytes_per_buffer >= (frame_size / i))) {
- return i;
- }
- }
-
- // Fallback to frame_size unless it exceeds MAX_PERIPH_BUFFERS_PER_FRAME
- if (hw_consts.max_periph_buffers_per_frame < frame_size) {
- return periph_buffers_per_frame;
- } else {
- return static_cast<uint16_t>(frame_size);
- }
-}
-
-static hailo_status calculate_credit_params(const CONTROL_PROTOCOL__hw_consts_t &hw_consts, uint16_t desc_page_size,
- hailo_stream_direction_t direction, bool should_optimize_credits, uint16_t *periph_bytes_per_buffer,
- uint16_t *periph_buffers_per_frame)
-{
- // Next parameters differ between RX and TX
-
- auto local_periph_bytes_per_buffer = (*periph_bytes_per_buffer);
- auto local_periph_buffers_per_frame = (*periph_buffers_per_frame);
- uint32_t periph_frame_size = (*periph_bytes_per_buffer) * (*periph_buffers_per_frame);
- const auto max_bytes_per_buffer = MAX(hw_consts.max_acceptable_bytes_per_buffer, (*periph_bytes_per_buffer));
-
- if (0 != (local_periph_bytes_per_buffer % hw_consts.fifo_word_granularity_bytes)) {
- return HAILO_INTERNAL_FAILURE;
- }
-
- if (should_optimize_credits) {
- // If credits optimizations flag is on, assuming periph_buffers_per_frame * periph_bytes_per_buffer == periph_frame_size
- // Find the lowest periph_buffers_per_frame that divides periph_frame_size and is bigger than periph_frame_size / max_bytes_per_buffer
- // Also, periph_bytes_per_buffer must be a multiple of 8
- const auto min_periph_buffers_per_frame = DIV_ROUND_UP(periph_frame_size, max_bytes_per_buffer);
- local_periph_buffers_per_frame = calculate_periph_buffers_per_frame(hw_consts, static_cast<uint16_t>(min_periph_buffers_per_frame),
- periph_frame_size, local_periph_buffers_per_frame);
- assert(IS_FIT_IN_UINT16(periph_frame_size / local_periph_buffers_per_frame));
- local_periph_bytes_per_buffer = static_cast<uint16_t>(periph_frame_size / local_periph_buffers_per_frame); // Must be integer according to last function
- }
- // Periph credits size must be lower than the following value to make sure that the credit size allows
- // for at least desc_page_size bytes left in the FIFO for the last descriptor in the pattern
- if ((direction == HAILO_D2H_STREAM) &&
- (static_cast<uint32_t>(local_periph_bytes_per_buffer) > (hw_consts.outbound_data_stream_size - 8 - desc_page_size))) {
- LOGGER__ERROR("Current periph_bytes_per_buffer is {} which is too high. Exiting.", local_periph_bytes_per_buffer);
- return HAILO_INTERNAL_FAILURE;
- }
-
- *periph_bytes_per_buffer = local_periph_bytes_per_buffer;
- *periph_buffers_per_frame = local_periph_buffers_per_frame;
- return HAILO_SUCCESS;
-}
-
-static Expected<LayerInfo> update_layer_info(const LayerInfo &original_layer_info,
- const CONTROL_PROTOCOL__host_buffer_info_t &buffer_info,
- const CONTROL_PROTOCOL__hw_consts_t &hw_consts, bool should_optimize_credits)
-{
- LayerInfo local_layer_info = original_layer_info;
-
- auto status = calculate_credit_params(hw_consts, buffer_info.desc_page_size, local_layer_info.direction,
- should_optimize_credits, &local_layer_info.nn_stream_config.periph_bytes_per_buffer,
- &local_layer_info.nn_stream_config.periph_buffers_per_frame);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- if (local_layer_info.max_shmifo_size == 0) {
- local_layer_info.max_shmifo_size = hw_consts.default_initial_credit_size;
- }
-
- return local_layer_info;
-}
-
-static hailo_status fill_boundary_input_layer(ContextResources &context_resources,
- ResourcesManager &resources_manager, const LayerInfo layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
- bool should_optimize_credits)
-{
- const auto transfer_size = (layer_info.nn_stream_config.periph_bytes_per_buffer *
- layer_info.nn_stream_config.core_buffers_per_frame);
-
- auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel);
-
- auto buffer_info = vdma_channel.value()->get_boundary_buffer_info(transfer_size);
- CHECK_EXPECTED_AS_STATUS(buffer_info);
-
- auto local_layer_info = update_layer_info(layer_info, *buffer_info, hw_consts, should_optimize_credits);
- CHECK_EXPECTED_AS_STATUS(local_layer_info);
-
- BoundaryEdgeLayer edge_layer{};
- edge_layer.layer_info = local_layer_info.release();
- edge_layer.channel_id = vdma_channel.value()->get_channel_id();
- edge_layer.buffer_info = buffer_info.value();
- context_resources.add_edge_layer(edge_layer);
-
- LOGGER__DEBUG("Boundary input stream: {} h2d_channel: {}.", layer_info.stream_index, edge_layer.channel_id);
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_inter_context_input_layer(ContextResources &context_resources,
- ResourcesManager &resources_manager, const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
- bool should_optimize_credits)
-{
- const auto channel_id = resources_manager.get_available_channel_id(to_layer_identifier(layer_info),
- VdmaChannel::Direction::H2D, layer_info.dma_engine_index);
- CHECK_EXPECTED_AS_STATUS(channel_id);
-
- /* Get inter context buffer previously created */
- const auto &connected_context = layer_info.connected_context_info;
- auto intermediate_buffer_key = std::make_pair(connected_context.context_index, connected_context.stream_index);
- auto inter_context_buffer_exp = resources_manager.get_inter_context_buffer(intermediate_buffer_key);
- CHECK_EXPECTED_AS_STATUS(inter_context_buffer_exp, "Failed to find inter context buffer for src context {}, src_stream_index {}",
- connected_context.context_index, connected_context.stream_index);
- auto &inter_context_buffer = inter_context_buffer_exp->get();
-
- auto local_layer_info = update_layer_info(layer_info, inter_context_buffer.get_host_buffer_info(), hw_consts,
- should_optimize_credits);
- CHECK_EXPECTED_AS_STATUS(local_layer_info);
-
- InterContextEdgeLayer edge_layer{};
- edge_layer.layer_info = local_layer_info.release();
- edge_layer.channel_id = channel_id.value();
- edge_layer.buffer_info = inter_context_buffer.get_host_buffer_info();
- context_resources.add_edge_layer(edge_layer);
-
- LOGGER__DEBUG("Intermediate input stream {}, src_context:{}, dst_context: {}, h2d_channel {}.",
- layer_info.stream_index, layer_info.context_index, layer_info.connected_context_info.context_index,
- channel_id.value());
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_boundary_output_layer(ContextResources &context_resources,
- ResourcesManager &resources_manager, const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
- bool should_optimize_credits)
-{
- const auto transfer_size = (layer_info.nn_stream_config.periph_bytes_per_buffer *
- layer_info.nn_stream_config.core_buffers_per_frame);
-
- auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel);
-
- auto buffer_info = vdma_channel.value()->get_boundary_buffer_info(transfer_size);
- CHECK_EXPECTED_AS_STATUS(buffer_info);
-
- auto local_layer_info = update_layer_info(layer_info, *buffer_info, hw_consts, should_optimize_credits);
- CHECK_EXPECTED_AS_STATUS(local_layer_info);
-
- BoundaryEdgeLayer edge_layer{};
- edge_layer.layer_info = local_layer_info.release();
- edge_layer.channel_id = vdma_channel.value()->get_channel_id();
- edge_layer.buffer_info = buffer_info.value();
- context_resources.add_edge_layer(edge_layer);
-
- LOGGER__DEBUG("Boundary output stream: {} d2h_channel: {}.", layer_info.stream_index, edge_layer.channel_id);
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_inter_context_output_layer(ContextResources &context_resources,
- ResourcesManager &resources_manager, const LayerInfo &layer_info,
- const CONTROL_PROTOCOL__hw_consts_t &hw_consts, bool should_optimize_credits)
-{
- const auto channel_id = resources_manager.get_available_channel_id(to_layer_identifier(layer_info),
- VdmaChannel::Direction::D2H, layer_info.dma_engine_index);
- CHECK_EXPECTED_AS_STATUS(channel_id);
-
- const auto frame_credits_in_bytes = (layer_info.nn_stream_config.periph_bytes_per_buffer *
- layer_info.nn_stream_config.core_buffers_per_frame);
-
- auto inter_context_buffer_exp = resources_manager.create_inter_context_buffer(frame_credits_in_bytes,
- layer_info.stream_index, layer_info.context_index, layer_info.network_name);
- CHECK_EXPECTED_AS_STATUS(inter_context_buffer_exp);
- auto &inter_context_buffer = inter_context_buffer_exp->get();
-
- auto local_layer_info = update_layer_info(layer_info, inter_context_buffer.get_host_buffer_info(), hw_consts,
- should_optimize_credits);
- CHECK_EXPECTED_AS_STATUS(local_layer_info);
-
- InterContextEdgeLayer edge_layer{};
- edge_layer.layer_info = local_layer_info.release();
- edge_layer.channel_id = channel_id.value();
- edge_layer.buffer_info = inter_context_buffer.get_host_buffer_info();
- context_resources.add_edge_layer(edge_layer);
-
- LOGGER__DEBUG("Inter-context output stream {}, src_context:{}, d2h_channel {}.",
- layer_info.stream_index, layer_info.context_index, channel_id.value());
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_ddr_output_layer(ContextResources &context_resources,
- ResourcesManager &resources_manager, const LayerInfo &layer_info,
- const CONTROL_PROTOCOL__hw_consts_t &hw_consts)
-{
- CHECK(resources_manager.get_supported_features().padded_ddr_buffers, HAILO_INVALID_HEF,
- "Failed opening non-compatible HEF that uses the following deprecated features: host-managed DDR buffers."
- "Please re-compile the HEF using a newer Dataflow Compiler version (v3.11.0 or newer)");
- // Allocate resources and prepare ddr_info
-
- DdrChannelsInfo ddr_pair_info = {};
- ddr_pair_info.h2d_stream_index = layer_info.connected_context_info.stream_index;
- ddr_pair_info.d2h_stream_index = layer_info.stream_index;
- ddr_pair_info.network_index = layer_info.network_index;
-
- // It is assumed that output channels are parsed before input channels.
- // Allocate vdma channel index for both edges
- const auto h2d_layer_identifier = std::make_tuple(LayerType::DDR, layer_info.name, ddr_pair_info.h2d_stream_index);
- const auto h2d_channel_id = resources_manager.get_available_channel_id(h2d_layer_identifier,
- VdmaChannel::Direction::H2D, layer_info.connected_context_info.dma_engine_index);
- CHECK_EXPECTED_AS_STATUS(h2d_channel_id);
- ddr_pair_info.h2d_channel_id = h2d_channel_id.value();
-
- const auto d2h_layer_identifier = std::make_tuple(LayerType::DDR, layer_info.name, ddr_pair_info.d2h_stream_index);
- const auto d2h_channel_id = resources_manager.get_available_channel_id(d2h_layer_identifier,
- VdmaChannel::Direction::D2H, layer_info.dma_engine_index);
- CHECK_EXPECTED_AS_STATUS(d2h_channel_id);
- ddr_pair_info.d2h_channel_id = d2h_channel_id.value();
-
- ddr_pair_info.row_size = layer_info.nn_stream_config.core_bytes_per_buffer;
- ddr_pair_info.min_buffered_rows = layer_info.ddr_info.min_buffered_rows;
- ddr_pair_info.total_buffers_per_frame = layer_info.ddr_info.total_buffers_per_frame;
-
- // Create the ddr buffer
- auto ddr_channels_pair = context_resources.create_ddr_channels_pair(ddr_pair_info);
- CHECK_EXPECTED_AS_STATUS(ddr_channels_pair);
-
- // On ddr layers, we assume the periph credit size is aligned to the size of descriptor, so we don't want to
- // optimize the credits.
- const bool should_optimize_credits = false;
- auto local_layer_info = update_layer_info(layer_info, ddr_channels_pair->get().get_host_buffer_info(), hw_consts,
- should_optimize_credits);
- CHECK_EXPECTED_AS_STATUS(local_layer_info);
-
- DdrChannelEdgeLayer edge_layer{};
- edge_layer.layer_info = local_layer_info.release();
- edge_layer.channel_id = ddr_pair_info.d2h_channel_id;
- edge_layer.buffer_info = ddr_channels_pair->get().get_host_buffer_info();
- context_resources.add_edge_layer(edge_layer);
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_ddr_input_layer(ContextResources &context_resources,
- const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts)
-{
- auto connected_stream_index = layer_info.connected_context_info.stream_index;
- auto ddr_channels_pair = context_resources.get_ddr_channels_pair(connected_stream_index);
- CHECK(ddr_channels_pair, HAILO_INVALID_HEF, "Matching DDR layer as not found for context {} src stream {}",
- layer_info.context_index, connected_stream_index);
-
- const auto ddr_info = ddr_channels_pair->get().info();
- LOGGER__DEBUG("DDR layer: input stream_index: {}, output stream_index: {}, h2d_channel {}, d2h_channel: {}.",
- ddr_info.h2d_stream_index, ddr_info.d2h_stream_index, ddr_info.h2d_channel_id, ddr_info.d2h_channel_id);
-
- CHECK(layer_info.stream_index == ddr_info.h2d_stream_index, HAILO_INVALID_HEF, "DDR channel pair mismatch in h2d channel");
- CHECK(layer_info.connected_context_info.stream_index == ddr_info.d2h_stream_index, HAILO_INVALID_HEF, "DDR channel pair mismatch in d2h channel");
- CHECK(layer_info.network_index == ddr_info.network_index, HAILO_INVALID_HEF, "DDR channel pair mismatch network_index");
-
- // On ddr layers, we assume the periph credit size is aligned to the size of descriptor, so we don't want to
- // optimize the credits.
- const bool should_optimize_credits = false;
- auto local_layer_info = update_layer_info(layer_info, ddr_channels_pair->get().get_host_buffer_info(), hw_consts,
- should_optimize_credits);
- CHECK_EXPECTED_AS_STATUS(local_layer_info);
-
- DdrChannelEdgeLayer edge_layer{};
- edge_layer.layer_info = local_layer_info.release();
- edge_layer.channel_id = ddr_info.h2d_channel_id;
- edge_layer.buffer_info = ddr_channels_pair->get().get_host_buffer_info();
- context_resources.add_edge_layer(edge_layer);
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status add_ddr_buffers_info(std::vector<ContextSwitchConfigActionPtr> &configuration_actions,
- const ContextResources &context_resources)
-{
- bool start_fw_ddr_buffer_task = false;
- for (auto& ddr_channels_pair : context_resources.get_ddr_channels_pairs()) {
- if (ddr_channels_pair.need_manual_credit_management()) {
- const auto ddr_info = ddr_channels_pair.info();
- auto ddr_pair_action = DdrPairInfoAction::create(ddr_info.h2d_channel_id, ddr_info.d2h_channel_id,
- ddr_info.network_index, ddr_channels_pair.descriptors_per_frame(), ddr_channels_pair.descs_count());
- CHECK_EXPECTED_AS_STATUS(ddr_pair_action);
- configuration_actions.emplace_back(ddr_pair_action.release());
-
- start_fw_ddr_buffer_task = true;
- }
- }
-
- if (start_fw_ddr_buffer_task) {
- auto start_ddr_buffering_action = StartDdrBufferingTaskAction::create();
- CHECK_EXPECTED_AS_STATUS(start_ddr_buffering_action);
- configuration_actions.emplace_back(start_ddr_buffering_action.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status parse_and_fill_edge_layers_mapping(
- ContextResources &context_resources,
- const ContextMetadata &context_metadata,
- ResourcesManager &resources_manager)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- auto hw_consts = Control::get_hw_consts(resources_manager.get_device());
- CHECK_EXPECTED_AS_STATUS(hw_consts);
- const bool should_optimize_credits = hw_consts->should_optimize_credits &&
- (HAILO_POWER_MODE_PERFORMANCE == resources_manager.get_power_mode());
-
- // Parse the edge layer by order - first output edge layers, then ddr inputs and only then the input edge layers
- // In order to insure that input data can enter the chip only after all other elements are configured.
- // We parse ddr inputs before boundary/inter-context because otherwise on C2C mode we may lose some credit.
-
- for (const auto &output_layer_info : context_metadata.get_ddr_output_layers()) {
- status = fill_ddr_output_layer(context_resources, resources_manager, output_layer_info, *hw_consts);
- CHECK_SUCCESS(status);
- }
-
- for (const auto &output_layer_info : context_metadata.get_boundary_output_layers()) {
- status = fill_boundary_output_layer(context_resources, resources_manager, output_layer_info,
- *hw_consts, should_optimize_credits);
- CHECK_SUCCESS(status);
- }
-
- for (const auto &output_layer_info : context_metadata.get_inter_context_output_layers()) {
- status = fill_inter_context_output_layer(context_resources, resources_manager, output_layer_info,
- *hw_consts, should_optimize_credits);
- CHECK_SUCCESS(status);
- }
-
- for (const auto &input_layer_info : context_metadata.get_ddr_input_layers()) {
- status = fill_ddr_input_layer(context_resources, input_layer_info, *hw_consts);
- CHECK_SUCCESS(status);
- }
-
- for (const auto &input_layer_info : context_metadata.get_boundary_input_layers()) {
- status = fill_boundary_input_layer(context_resources, resources_manager, input_layer_info,
- *hw_consts, should_optimize_credits);
- CHECK_SUCCESS(status);
- }
-
- for (const auto &input_layer_info : context_metadata.get_inter_context_input_layers()) {
- status = fill_inter_context_input_layer(context_resources, resources_manager, input_layer_info,
- *hw_consts, should_optimize_credits);
- CHECK_SUCCESS(status);
- }
-
- status = context_resources.validate_edge_layers();
- CHECK_SUCCESS(status);
-
- /* UN-Lock resources at the end of the context -
- h2d inter-context, d2h inter-context and DDR buffer channels */
- for (const auto &input_layer_info : context_metadata.get_inter_context_input_layers()) {
- status = resources_manager.free_channel_index(to_layer_identifier(input_layer_info));
- CHECK_SUCCESS(status);
- }
-
- for (const auto &output_layer_info : context_metadata.get_inter_context_output_layers()) {
- status = resources_manager.free_channel_index(to_layer_identifier(output_layer_info));
- CHECK_SUCCESS(status);
- }
-
- for (const auto &output_layer_info : context_metadata.get_ddr_output_layers()) {
- const auto h2d_layer_identifier = std::make_tuple(LayerType::DDR, output_layer_info.name,
- output_layer_info.connected_context_info.stream_index);
- status = resources_manager.free_channel_index(h2d_layer_identifier);
- CHECK_SUCCESS(status);
-
- const auto d2h_layer_identifier = std::make_tuple(LayerType::DDR, output_layer_info.name,
- output_layer_info.stream_index);
- status = resources_manager.free_channel_index(d2h_layer_identifier);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-// Returns pairs of form [start, end] (inclusive) of repeated 'ContextSwitchConfigAction's in the given vector
-static std::vector<std::pair<uint32_t, uint32_t>> get_repreated_actions_boundary_indices(
- const std::vector<ContextSwitchConfigActionPtr> &actions)
-{
- const uint32_t num_actions = static_cast<uint32_t>(actions.size());
-
- std::vector<std::pair<uint32_t, uint32_t>> repeated_indexes;
- uint32_t start_index = 0;
- while (start_index < num_actions) {
- auto end_index = start_index + 1;
- do
- {
- if (end_index == num_actions) {
- break;
- }
- if (actions[start_index]->get_type() != actions[end_index]->get_type()) {
- break;
- }
- end_index++;
- } while (true);
-
- repeated_indexes.emplace_back(start_index, end_index - 1);
- start_index = end_index;
- }
-
- return repeated_indexes;
-}
-
-// Returns a map from start indexes of repeated actions to the size of the chunk (number of repeated actions)
-static std::map<uint32_t, uint8_t> get_start_indexes_of_repeated_actions(
- const std::vector<ContextSwitchConfigActionPtr> &actions,
- const std::vector<std::pair<uint32_t, uint32_t>> &repeated_indexes,
- // TODO: get this from HardCoded config (HRT-5352)
- const std::set<ContextSwitchConfigAction::Type> &action_types_denylist = {})
-{
- std::map<uint32_t, uint8_t> result;
- for (const auto &index_pair : repeated_indexes) {
- if (!actions[index_pair.first]->supports_repeated_block()) {
- continue;
- }
-
- if (contains(action_types_denylist, actions[index_pair.first]->get_type())) {
- continue;
- }
-
- // TODO: Move merge calculation to HRT-5352
- // Merge calculation (see also - CONTEXT_SWITCH_DEFS__repeated_action_header_t in common/include/context_switch_defs.h):
- // * Assume there are x repeated actions that can be merged
- // * Let a := sizeof(action_to_be_merged) [without CONTEXT_SWITCH_DEFS__common_action_header_t]
- // * sizeof(CONTEXT_SWITCH_DEFS__common_action_header_t) is 5
- // * sizeof(CONTEXT_SWITCH_DEFS__repeated_action_header_t) is 3
- // Then:
- // * original_size = x * (5 + a) = 5x + ax
- // * new_size = 5 + 3 + ax = 8 + ax
- // * new_size < original_size <=> 8 + ax < 5x + ax <=> 8 < 5x <=> 1.6 < x
- // Hence we merge for x >= 2
- static_assert(sizeof(CONTEXT_SWITCH_DEFS__common_action_header_t) == 5,
- "Merge calculation assumes that 'sizeof(CONTEXT_SWITCH_DEFS__common_action_header_t) == 5'");
- static_assert(sizeof(CONTEXT_SWITCH_DEFS__repeated_action_header_t) == 3,
- "Merge calculation assumes that 'sizeof(CONTEXT_SWITCH_DEFS__repeated_action_header_t) == 3'");
- static const uint32_t MIN_REQUIRED_FOR_MERGING = 2;
-
- uint32_t start_index = index_pair.first;
- const uint32_t end_index = index_pair.second;
- while (start_index < end_index) {
- const auto curr_chunk_size = static_cast<uint8_t>(std::min(
- static_cast<uint32_t>(std::numeric_limits<uint8_t>::max()),
- end_index - start_index + 1));
- if (curr_chunk_size < MIN_REQUIRED_FOR_MERGING) {
- break;
- }
-
- result.emplace(start_index, curr_chunk_size);
-
- start_index += curr_chunk_size;
- }
- }
-
- return result;
-}
-
-static std::set<std::pair<uint32_t, uint32_t>> get_indexes_of_action_type(
- const std::vector<ContextSwitchConfigActionPtr> &actions,
- const std::vector<std::pair<uint32_t, uint32_t>> &repeated_indexes,
- const ContextSwitchConfigAction::Type &required_action_type)
-{
- std::set<std::pair<uint32_t, uint32_t>> result;
- for (const auto &index_pair : repeated_indexes) {
- const auto curr_action_type = actions[index_pair.first]->get_type();
- if (required_action_type != curr_action_type) {
- continue;
- }
-
- result.emplace(index_pair);
- }
-
- return result;
-}
-
-static std::set<uint32_t> get_end_indexes_of_action_type(
- const std::vector<ContextSwitchConfigActionPtr> &actions,
- const std::vector<std::pair<uint32_t, uint32_t>> &repeated_indexes,
- const ContextSwitchConfigAction::Type &required_action_type)
-{
- std::set<uint32_t> result;
- for (const auto &index_pair : get_indexes_of_action_type(actions, repeated_indexes, required_action_type)) {
- result.insert(index_pair.second);
- }
-
- return result;
-}
-
-static hailo_status push_fetch_config_actions(
- std::vector<ConfigBuffer> &config_resources, const std::set<uint8_t> &pending_config_stream_indexes,
- std::vector<uint16_t> &total_ccw_bursts, const bool support_pre_fetch,
- std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
-{
- CHECK(total_ccw_bursts.size() == config_resources.size(), HAILO_INTERNAL_FAILURE, "Invalid cfg channels count");
- for (const auto config_stream_index : pending_config_stream_indexes) {
- CHECK(config_stream_index < config_resources.size(), HAILO_INTERNAL_FAILURE, "Invalid cfg channel index");
-
- if (support_pre_fetch) {
- auto action = AddCcwBurstAction::create(config_stream_index, total_ccw_bursts[config_stream_index]);
- CHECK_EXPECTED_AS_STATUS(action);
- processed_configuration_actions.emplace_back(action.release());
- } else {
- const auto desc_count = config_resources[config_stream_index].program_descriptors();
- CHECK_EXPECTED_AS_STATUS(desc_count);
- CHECK(IS_FIT_IN_UINT16(desc_count.value()), HAILO_INVALID_OPERATION,
- "On cfg with continuous mode, max descriptors size must fit in uint16_t");
-
- auto action = FetchCfgChannelDescriptorsAction::create(config_resources[config_stream_index].channel_id(),
- static_cast<uint16_t>(desc_count.value()));
- CHECK_EXPECTED_AS_STATUS(action);
- processed_configuration_actions.emplace_back(action.release());
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status write_ccw_to_buffer(ConfigBuffer& config_buffer, const WriteDataCcwAction &ccw_action,
- bool support_pre_fetch)
-{
- const bool is_last_write = config_buffer.size_left() == ccw_action.data().size();
- if (support_pre_fetch && is_last_write) {
- auto status = config_buffer.pad_with_nops();
- CHECK_SUCCESS(status);
- }
-
- auto status = config_buffer.write(ccw_action.data());
- CHECK_SUCCESS(status);
-
- if (support_pre_fetch && is_last_write) {
- auto desc_count = config_buffer.program_descriptors();
- CHECK_EXPECTED_AS_STATUS(desc_count);
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status proccess_write_ccw_action(const ContextSwitchConfigActionPtr &configuration_action,
- std::vector<ConfigBuffer> &config_resources, std::set<uint8_t> &pending_config_stream_indexes,
- std::vector<uint16_t> &total_ccw_bursts, const std::set<uint32_t> &end_indexes_of_write_ccw_actions,
- const uint32_t &action_index, const bool support_pre_fetch,
- std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
-{
- assert(ContextSwitchConfigAction::Type::WriteDataCcw == configuration_action->get_type());
- const auto &write_ccw_action = *static_cast<const WriteDataCcwAction*>(configuration_action.get());
-
- // Add the config stream index of the current WriteDataCcwAction
- const auto config_stream_index = write_ccw_action.config_stream_index();
- pending_config_stream_indexes.insert(config_stream_index);
-
- // TODO: get CCW headers from proto (need to add it into the proto)
- const uint16_t ccw_bursts = 1;
- auto accum_ccw_bursts = total_ccw_bursts[config_stream_index] + ccw_bursts;
- CHECK(IS_FIT_IN_UINT16(accum_ccw_bursts), HAILO_INTERNAL_FAILURE,
- "Failed to parse HEF. action fetch ccw burst supports only to 2^16 bursts.");
- assert(config_stream_index < total_ccw_bursts.size());
- total_ccw_bursts[config_stream_index] = static_cast<uint16_t>(accum_ccw_bursts);
-
- assert(config_stream_index < config_resources.size());
- auto status = write_ccw_to_buffer(config_resources[config_stream_index], write_ccw_action, support_pre_fetch);
- CHECK_SUCCESS(status);
-
- // At the end of a consecutive group of WriteDataCcwActions, we program the
- // descriptors for all the config channels used.
- if (contains(end_indexes_of_write_ccw_actions, action_index)) {
- // Add the last CCW write into the buffer
- processed_configuration_actions.emplace_back(configuration_action);
-
- status = push_fetch_config_actions(config_resources, pending_config_stream_indexes, total_ccw_bursts,
- support_pre_fetch, processed_configuration_actions);
- CHECK_SUCCESS(status);
-
- // Cleanups
- pending_config_stream_indexes.clear();
- for (uint8_t cleanup_ch_index = 0; cleanup_ch_index < total_ccw_bursts.size(); cleanup_ch_index++) {
- total_ccw_bursts[cleanup_ch_index] = 0;
- }
- } else {
- // Add the current action
- processed_configuration_actions.emplace_back(configuration_action);
- }
-
- return HAILO_SUCCESS;
-}
-
-static Expected<uint8_t> find_dummy_stream(const LayerInfo &layer_info, const ContextResources &context_resources)
-{
- // TODO: HRT-8611 use one loop for all edge layers
- for (const auto &edge_layer : context_resources.get_boundary_layers()) {
- if (edge_layer.layer_info.direction != layer_info.direction) {
- return Expected<uint8_t>(edge_layer.layer_info.stream_index);
- }
- }
- for (const auto &edge_layer : context_resources.get_inter_context_layers()) {
- if (edge_layer.layer_info.direction != layer_info.direction) {
- return Expected<uint8_t>(edge_layer.layer_info.stream_index);
- }
- }
- for (const auto &edge_layer : context_resources.get_ddr_channel_layers()) {
- if (edge_layer.layer_info.direction != layer_info.direction) {
- return Expected<uint8_t>(edge_layer.layer_info.stream_index);
- }
- }
-
- LOGGER__ERROR("Couldn't find dummy stream from context");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-static hailo_status add_change_vdma_to_stream_mapping(
- const NetworkGroupMetadata &network_group_metadata, const ResourcesManager &resources_manager,
- ContextResources &context_resources, uint8_t context_index,
- std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
-{
- for (const auto &layer_info : network_group_metadata.get_all_layer_infos()) {
- auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
- CHECK_EXPECTED_AS_STATUS(vdma_channel);
-
- const auto channel_id = vdma_channel.value()->get_channel_id();
- const bool is_dummy_stream = layer_info.context_index != context_index;
- uint8_t stream_index = layer_info.stream_index;
- if (is_dummy_stream) {
- auto dummy_stream_index = find_dummy_stream(layer_info, context_resources);
- CHECK_EXPECTED_AS_STATUS(dummy_stream_index);
- stream_index = *dummy_stream_index;
- }
-
- auto action = ChangeVdmaToStreamMapping::create(channel_id, stream_index, is_dummy_stream);
- CHECK_EXPECTED_AS_STATUS(action);
- processed_configuration_actions.emplace_back(action.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status push_edge_layer_activation_actions(
- const ContextResources &context_resources,
- std::vector<ContextSwitchConfigActionPtr> &actions)
-{
- // Activate the edge layer by order - first output edge layers, then ddr inputs and only then the input edge layers
- // In order to insure that input data can enter the chip only after all other elements are configured.
- // We parse ddr inputs before boundary/inter-context because otherwise on C2C mode we may lose some credit.
-
- for (const auto &edge_layer : context_resources.get_ddr_channel_layers(HAILO_D2H_STREAM)) {
- auto activate_action = ActivateDdrOutputChannelAction::create(edge_layer.channel_id,
- edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
- edge_layer.layer_info.ddr_info.min_buffered_rows);
- CHECK_EXPECTED_AS_STATUS(activate_action);
- actions.emplace_back(activate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_boundary_layers(HAILO_D2H_STREAM)) {
- auto activate_action = ActivateBoundaryOutputChannelAction::create(edge_layer.channel_id,
- edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info);
- CHECK_EXPECTED_AS_STATUS(activate_action);
- actions.emplace_back(activate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_inter_context_layers(HAILO_D2H_STREAM)) {
- auto activate_action = ActivateInterContextOutputChannelAction::create(edge_layer.channel_id,
- edge_layer.layer_info.stream_index, edge_layer.layer_info.network_index,
- edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info);
- CHECK_EXPECTED_AS_STATUS(activate_action);
- actions.emplace_back(activate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_ddr_channel_layers(HAILO_H2D_STREAM)) {
- const auto d2h_stream_index = edge_layer.layer_info.connected_context_info.stream_index;
- auto pair = context_resources.get_ddr_channels_pair(d2h_stream_index);
- CHECK_EXPECTED_AS_STATUS(pair);
- const auto d2h_channel_id = pair->get().info().d2h_channel_id;
-
- auto activate_action = ActivateDdrInputChannelAction::create(edge_layer.channel_id,
- edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
- edge_layer.layer_info.max_shmifo_size, d2h_channel_id);
- CHECK_EXPECTED_AS_STATUS(activate_action);
- actions.emplace_back(activate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_boundary_layers(HAILO_H2D_STREAM)) {
- auto activate_action = ActivateBoundaryInputChannelAction::create(edge_layer.channel_id,
- edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
- edge_layer.layer_info.max_shmifo_size);
- CHECK_EXPECTED_AS_STATUS(activate_action);
- actions.emplace_back(activate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_inter_context_layers(HAILO_H2D_STREAM)) {
- auto activate_action = ActivateInterContextInputChannelAction::create(edge_layer.channel_id,
- edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
- edge_layer.layer_info.max_shmifo_size);
- CHECK_EXPECTED_AS_STATUS(activate_action);
- actions.emplace_back(activate_action.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status proccess_trigger_new_data_input_action(const ContextSwitchConfigActionPtr &configuration_action,
- uint32_t trigger_new_data_from_input_group_start,
- uint32_t trigger_new_data_from_input_group_end,
- const uint32_t &action_index,
- const NetworkGroupMetadata &network_group_metadata,
- const ResourcesManager &resources_manager,
- ContextResources &context_resources,
- uint8_t context_index,
- std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions, bool is_single_context)
-{
- if (trigger_new_data_from_input_group_start == action_index) {
- auto status = push_edge_layer_activation_actions(context_resources, processed_configuration_actions);
- CHECK_SUCCESS(status);
-
- if (!is_single_context) {
- status = add_change_vdma_to_stream_mapping(network_group_metadata, resources_manager,
- context_resources, context_index, processed_configuration_actions);
- CHECK_SUCCESS(status);
- }
-
- // DDR buffer info actions need to happen after the edge layer activation actions.
- status = add_ddr_buffers_info(processed_configuration_actions, context_resources);
- CHECK_SUCCESS(status);
- }
-
- // Add the current action
- processed_configuration_actions.emplace_back(configuration_action);
-
- // At the end of a consecutive group of TriggerNewDataFromDataInput actions, we can trigger the BurstCreditsTask
- // in the FW, via StartBurstCreditsTaskAction.
- if (trigger_new_data_from_input_group_end == action_index) {
- auto start_burst_credits_task_action = StartBurstCreditsTaskAction::create();
- CHECK_EXPECTED_AS_STATUS(start_burst_credits_task_action);
- processed_configuration_actions.emplace_back(start_burst_credits_task_action.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-// At the end of each consecutive group of WriteDataCcwAction, a FetchCfgChannelDescriptorsAction is added.
-static hailo_status add_fetch_config_actions(std::vector<ContextSwitchConfigActionPtr> &configuration_actions,
- std::vector<ConfigBuffer> &config_resources, bool support_pre_fetch)
-{
- const auto repeated_indexes = get_repreated_actions_boundary_indices(configuration_actions);
- const auto end_indexes_of_write_ccws = get_end_indexes_of_action_type(configuration_actions,
- repeated_indexes, ContextSwitchConfigAction::Type::WriteDataCcw);
-
- std::set<uint8_t> pending_config_stream_indexes;
- std::vector<uint16_t> total_ccw_bursts(config_resources.size(), 0);
- std::vector<ContextSwitchConfigActionPtr> processed_configuration_actions;
- for (uint32_t action_index = 0; action_index < configuration_actions.size(); action_index++) {
- const auto &configuration_action = configuration_actions[action_index];
- if (ContextSwitchConfigAction::Type::WriteDataCcw == configuration_action->get_type()) {
- auto status = proccess_write_ccw_action(configuration_action, config_resources, pending_config_stream_indexes,
- total_ccw_bursts, end_indexes_of_write_ccws, action_index, support_pre_fetch, processed_configuration_actions);
- CHECK_SUCCESS(status);
- } else {
- // Add the current action
- processed_configuration_actions.emplace_back(configuration_action);
- }
- }
-
- // Replace the original configuration actions with the processed ones.
- configuration_actions = processed_configuration_actions;
-
- return HAILO_SUCCESS;
-}
-
-// Push activate config channels in the beginning of the context, and deactivation on end of context.
-static hailo_status add_config_channel_activation_actions(std::vector<ContextSwitchConfigActionPtr> &actions,
- const std::vector<ConfigBuffer> &config_resources)
-{
- std::vector<ContextSwitchConfigActionPtr> processed_actions;
- const size_t new_actions_count = 2 * config_resources.size();
- processed_actions.reserve(actions.size() + new_actions_count);
-
- for (uint8_t config_stream_index = 0; config_stream_index < config_resources.size(); config_stream_index++) {
- const auto &config_buffer = config_resources[config_stream_index];
- auto activate_action = ActivateConfigChannelAction::create(config_stream_index, config_buffer.channel_id(),
- config_buffer.get_host_buffer_info());
- CHECK_EXPECTED_AS_STATUS(activate_action);
- processed_actions.push_back(activate_action.release());
- }
-
- processed_actions.insert(processed_actions.end(), actions.begin(), actions.end());
-
- for (uint8_t config_stream_index = 0; config_stream_index < config_resources.size(); config_stream_index++) {
- const auto &config_buffer = config_resources[config_stream_index];
- auto deactivate_action = DeactivateConfigChannelAction::create(config_stream_index, config_buffer.channel_id());
- CHECK_EXPECTED_AS_STATUS(deactivate_action);
- processed_actions.push_back(deactivate_action.release());
- }
-
- actions = processed_actions;
- return HAILO_SUCCESS;
-}
-
-// For any context with edge layers (the preliminary context when in preliminary_run_asap mode or dynamic contexts),
-// we need to add the following:
-// * Activate*Channel actions (activation order is documented in push_edge_layer_activation_actions)
-// * ChangeVdmaToStreamMapping for each boundary stream in the network group (even for boundaries not activated in the
-// current context).
-// * DdrPairInfoActions for each ddr, followed by StartDdrBufferingTaskAction.
-// * TriggerNewDataFromDataInput for each input layer (inter context/ boundary) in the context. This action is given
-// from the HEF.
-// * Finally StartBurstCreditsTaskAction
-static hailo_status handle_edge_layer_activation_actions(std::vector<ContextSwitchConfigActionPtr> &configuration_actions,
- const NetworkGroupMetadata &network_group_metadata,
- const ResourcesManager &resources_manager, ContextResources &context_resources, uint8_t context_index,
- bool is_preliminary_context, bool is_first_operation, bool is_single_context)
-{
- if (is_preliminary_context && !resources_manager.get_supported_features().preliminary_run_asap) {
- // Nothing to do - no edge layers in the preliminary context if not running in preliminary_run_asap mode.
- return HAILO_SUCCESS;
- }
- if (!is_preliminary_context && !is_first_operation) {
- // Nothing to do - edge layers in dynamic contexts only appear in the first operation.
- return HAILO_SUCCESS;
- }
-
- const auto repeated_indexes = get_repreated_actions_boundary_indices(configuration_actions);
- const auto trigger_new_data_from_input_group_indexes = get_indexes_of_action_type(
- configuration_actions, repeated_indexes, ContextSwitchConfigAction::Type::TriggerNewDataFromDataInput);
- CHECK(trigger_new_data_from_input_group_indexes.size() == 1, HAILO_INTERNAL_FAILURE,
- "Expected only one group of TriggerNewDataFromDataInput actions");
- const auto trigger_new_data_from_input_group_start = trigger_new_data_from_input_group_indexes.cbegin()->first;
- const auto trigger_new_data_from_input_group_end = trigger_new_data_from_input_group_indexes.cbegin()->second;
-
- std::vector<ContextSwitchConfigActionPtr> processed_configuration_actions;
- for (uint32_t action_index = 0; action_index < configuration_actions.size(); action_index++) {
- const auto &configuration_action = configuration_actions[action_index];
- if (ContextSwitchConfigAction::Type::TriggerNewDataFromDataInput == configuration_action->get_type()) {
- auto status = proccess_trigger_new_data_input_action(configuration_action,
- trigger_new_data_from_input_group_start, trigger_new_data_from_input_group_end, action_index,
- network_group_metadata, resources_manager, context_resources, context_index, processed_configuration_actions, is_single_context);
- CHECK_SUCCESS(status);
- } else {
- // Add the current action
- processed_configuration_actions.emplace_back(configuration_action);
- }
- }
-
- // Replace the original configuration actions with the processed ones.
- configuration_actions = processed_configuration_actions;
-
- return HAILO_SUCCESS;
-}
-
-// If groups of consecutive actions can be "merged" as repeated actions (saving room the FW's
-// action list) a RepeatedAction is placed before the relevant actions.
-// See also: CONTEXT_SWITCH_DEFS__repeated_action_header_t's documenting in context_switch_defs.h.
-static hailo_status handle_repeated_actions(std::vector<ContextSwitchConfigActionPtr> &configuration_actions)
-{
- const auto repeated_indexes = get_repreated_actions_boundary_indices(configuration_actions);
- const auto start_indexes_of_repeated_actions = get_start_indexes_of_repeated_actions(
- configuration_actions, repeated_indexes);
-
- std::vector<ContextSwitchConfigActionPtr> processed_configuration_actions;
- processed_configuration_actions.reserve(configuration_actions.size() + start_indexes_of_repeated_actions.size());
-
- uint32_t action_index = 0;
- while (action_index < configuration_actions.size()){
- if (contains(start_indexes_of_repeated_actions, action_index)) {
- // A group of actions can be "merged" as repeated actions.
- // Add a RepeatedAction
- const auto num_repeated = start_indexes_of_repeated_actions.at(action_index);
-
- std::vector<ContextSwitchConfigActionPtr> repeated_block;
- repeated_block.reserve(num_repeated);
- for (uint32_t repeated_offset = 0; repeated_offset < num_repeated; repeated_offset++) {
- repeated_block.emplace_back(configuration_actions[action_index]);
- action_index++;
- }
-
- auto repeated_header_action = RepeatedAction::create(std::move(repeated_block));
- CHECK_EXPECTED_AS_STATUS(repeated_header_action);
- processed_configuration_actions.emplace_back(repeated_header_action.value());
- }
- else {
- processed_configuration_actions.emplace_back(configuration_actions[action_index]);
- action_index++;
- }
- }
-
- // Replace the original configuration actions with the processed ones.
- configuration_actions = processed_configuration_actions;
-
- return HAILO_SUCCESS;
-}
-
-static bool is_mercury_device_type(const ProtoHEFHwArch &hw_arch)
-{
- /* TODO - HRT-5067 - use one hw_arch for mercury */
- return (PROTO__HW_ARCH__MERCURY == hw_arch) || (PROTO__HW_ARCH__GINGER == hw_arch) ||
- (PROTO__HW_ARCH__LAVENDER == hw_arch);
-}
-
-static Expected<std::vector<ContextSwitchConfigActionPtr>> process_operation(const ContextSwitchOperation &operation,
- const NetworkGroupMetadata &network_group_metadata,
- const ProtoHEFHwArch &hw_arch, uint8_t context_index, bool is_preliminary_context,
- bool is_first_operation, bool is_single_context, std::vector<ConfigBuffer> &config_resources,
- ResourcesManager &resources_manager,
- ContextResources &context_resources)
-{
- auto configuration_actions = operation.actions();
-
- // Next, we process the actions from the HEF. The resulting vector contains the configuration actions to be
- // executed in chronological order.
- const auto support_pre_fetch = is_mercury_device_type(hw_arch);
- auto status = add_fetch_config_actions(configuration_actions, config_resources, support_pre_fetch);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = handle_edge_layer_activation_actions(configuration_actions, network_group_metadata, resources_manager,
- context_resources, context_index, is_preliminary_context, is_first_operation, is_single_context);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = handle_repeated_actions(configuration_actions);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return configuration_actions;
-}
-
-static hailo_status write_action_list(const ContextResources & context_resources, ContextSwitchBufferBuilder &builder,
- const std::vector<ContextSwitchConfigActionPtr> &actions)
-{
- for (const auto &action : actions) {
- auto action_buffers = action->serialize(context_resources);
- CHECK_EXPECTED_AS_STATUS(action_buffers);
-
- for (auto &action_buffer : action_buffers.value()) {
- builder.write_action(MemoryView(action_buffer));
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status add_edge_layer_end_of_context_actions(const ContextResources &context_resources,
- std::vector<ContextSwitchConfigActionPtr> &actions)
-{
-
- for (const auto &edge_layer : context_resources.get_boundary_layers()) {
- const bool is_inter_context = false;
- // Validate boundary channels of current context.
- auto validate_action = ValidateChannelAction::create(edge_layer.channel_id, edge_layer.layer_info.direction,
- is_inter_context, static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
- edge_layer.layer_info.max_shmifo_size);
- CHECK_EXPECTED_AS_STATUS(validate_action);
- actions.push_back(validate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_inter_context_layers()) {
- const bool is_inter_context = true;
- auto deactivate_action = DeactivateChannelAction::create(edge_layer.channel_id, edge_layer.layer_info.direction,
- is_inter_context, static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
- edge_layer.layer_info.max_shmifo_size);
- CHECK_EXPECTED_AS_STATUS(deactivate_action);
- actions.push_back(deactivate_action.release());
- }
-
- for (const auto &edge_layer : context_resources.get_ddr_channel_layers()) {
- const bool is_inter_context = false;
- auto deactivate_action = DeactivateChannelAction::create(edge_layer.channel_id, edge_layer.layer_info.direction,
- is_inter_context, static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
- edge_layer.layer_info.max_shmifo_size);
- CHECK_EXPECTED_AS_STATUS(deactivate_action);
- actions.push_back(deactivate_action.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_context_recipes_for_multi_context(const ProtoHEFHwArch &hw_arch,
- ContextResources &context_resources, ResourcesManager &resources_manager,
- uint8_t context_index, const NetworkGroupMetadata &network_group_metadata, const ContextMetadata &context_metadata,
- bool is_single_context)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- // Add edge layers mapping
- status = parse_and_fill_edge_layers_mapping(context_resources, context_metadata, resources_manager);
- CHECK_SUCCESS(status);
-
- // Parse context
- bool first_operation = true;
- std::vector<ContextSwitchConfigActionPtr> actions;
- for (const auto &operation : context_metadata.get_operations()) {
- static const auto NOT_PRELIMINARY_CONTEXT = false;
- auto new_actions = process_operation(operation, network_group_metadata, hw_arch, context_index, NOT_PRELIMINARY_CONTEXT,
- first_operation, is_single_context, context_resources.get_config_buffers(), resources_manager,
- context_resources);
- CHECK_EXPECTED_AS_STATUS(new_actions);
-
- actions.insert(actions.end(), new_actions->begin(), new_actions->end());
- first_operation = false;
- }
-
- status = add_config_channel_activation_actions(actions, context_resources.get_config_buffers());
- CHECK_SUCCESS(status);
-
- if (is_single_context) {
- // Single context network must wait for network group change event after they finish the dynamic context.
- auto wait_action = WaitForNetworkGroupChangeAction::create();
- CHECK_EXPECTED_AS_STATUS(wait_action);
- actions.emplace_back(wait_action.release());
- }
- else {
- status = add_edge_layer_end_of_context_actions(context_resources, actions);
- }
-
- return write_action_list(context_resources, context_resources.builder(), actions);
-}
-
-static hailo_status create_boundary_channels(ResourcesManager &resources_manager,
- NetworkGroupMetadata &network_group_metadata)
-{
- for (const auto &layer_info : network_group_metadata.get_all_layer_infos()) {
- auto status = resources_manager.create_boundary_vdma_channel(layer_info);
- CHECK_SUCCESS(status);
- }
- return HAILO_SUCCESS;
-}
-
-static hailo_status fill_activation_config_recepies_for_multi_context(
- ContextResources &context_resources, ResourcesManager &resources_manager,
- std::shared_ptr<NetworkGroupMetadata> network_group_metadata)
-{
- auto hw_consts = Control::get_hw_consts(resources_manager.get_device());
- CHECK_EXPECTED_AS_STATUS(hw_consts);
- const bool should_optimize_credits = hw_consts->should_optimize_credits &&
- (HAILO_POWER_MODE_PERFORMANCE == resources_manager.get_power_mode());
-
- for (const auto &layer_info : network_group_metadata->get_output_layer_infos()){
- auto status = fill_boundary_output_layer(context_resources, resources_manager, layer_info, *hw_consts,
- should_optimize_credits);
- CHECK_SUCCESS(status);
- }
-
- for (const auto &layer_info : network_group_metadata->get_input_layer_infos()) {
- auto status = fill_boundary_input_layer(context_resources, resources_manager, layer_info, *hw_consts,
- should_optimize_credits);
- CHECK_SUCCESS(status);
- }
-
- auto status = context_resources.validate_edge_layers();
- CHECK_SUCCESS(status);
-
- std::vector<ContextSwitchConfigActionPtr> actions;
- for (const auto &edge_layer : context_resources.get_boundary_layers()) {
- auto action = edge_layer.layer_info.direction == HAILO_H2D_STREAM ?
- OpenBoundaryInputChannelAction::create(edge_layer.channel_id, edge_layer.buffer_info) :
- OpenBoundaryOutputChannelAction::create(edge_layer.channel_id, edge_layer.buffer_info);
- CHECK_EXPECTED_AS_STATUS(action);
- actions.emplace_back(action.release());
- }
-
- return write_action_list(context_resources, context_resources.builder(), actions);
-}
-
-static hailo_status fill_batch_switching_context_config_recepies_for_multi_context(
- ContextResources &context_resources, const NetworkGroupMetadata &network_group_metadata)
-{
- std::vector<ContextSwitchConfigActionPtr> actions;
-
- // We need to reset the ddr buffering task when we change the batch_size (since it depends on the batch_size param)
- auto reset_ddr_action = ResetDdrBufferingTaskAction::create();
- CHECK_EXPECTED_AS_STATUS(reset_ddr_action);
- actions.emplace_back(reset_ddr_action.release());
-
- // We need to re-enable all the lcus of the first context since some of their config regs are batch dependent.
- // => We'll filter out all of the "enable lcu" actions from the preliminary context
- static const std::set<ContextSwitchConfigAction::Type> BATCH_SWITCHING_ACTIONS = {
- ContextSwitchConfigAction::Type::EnableLcuDefault,
- ContextSwitchConfigAction::Type::EnableLcuNonDefault
- };
- for (const auto &operation : network_group_metadata.preliminary_context().get_operations()) {
- auto operation_actions = operation.get_actions_of_type(BATCH_SWITCHING_ACTIONS);
-
- // Allowing repeated actions
- auto status = handle_repeated_actions(operation_actions);
- CHECK_SUCCESS(status);
-
- actions.insert(actions.end(), operation_actions.begin(), operation_actions.end());
- }
-
- return write_action_list(context_resources, context_resources.builder(), actions);
-}
-
-static hailo_status fill_preliminary_config_recepies_for_multi_context(const ProtoHEFHwArch &hw_arch,
- ContextResources &context_resources, ResourcesManager &resources_manager,
- std::shared_ptr<NetworkGroupMetadata> network_group_metadata, const PreliminaryContextMetadata &preliminary_context,
- bool is_single_context)
-{
-
- if (resources_manager.get_supported_features().preliminary_run_asap) {
- // Add edge layers mapping (only preliminary_run_asap networks have edge layers in the preliminary context)
- static const auto PRELIMINARY_CONTEXT_INDEX = 0;
- assert(PRELIMINARY_CONTEXT_INDEX < network_group_metadata->dynamic_contexts().size());
- auto status = parse_and_fill_edge_layers_mapping(context_resources,
- network_group_metadata->dynamic_contexts()[PRELIMINARY_CONTEXT_INDEX], resources_manager);
- CHECK_SUCCESS(status);
- }
-
- // Parse preliminary config
- std::vector<ContextSwitchConfigActionPtr> actions;
- bool first_operation = true;
- for (const auto &operation : preliminary_context.get_operations()) {
- static const auto PRELIMINARY_CONTEXT_INDEX = 0; // First context in the hef
- static const auto PRELIMINARY_CONTEXT = true;
- auto new_actions = process_operation(operation, *network_group_metadata, hw_arch, PRELIMINARY_CONTEXT_INDEX,
- PRELIMINARY_CONTEXT, first_operation, is_single_context, context_resources.get_config_buffers(), resources_manager,
- context_resources);
- CHECK_EXPECTED_AS_STATUS(new_actions);
-
- actions.insert(actions.end(), new_actions->begin(), new_actions->end());
- first_operation = false;
- }
-
- auto status = add_config_channel_activation_actions(actions, context_resources.get_config_buffers());
- CHECK_SUCCESS(status);
-
- return write_action_list(context_resources, context_resources.builder(), actions);
-}
-
-
-
-Expected<std::shared_ptr<ResourcesManager>> ResourcesManagerBuilder::build(uint8_t net_group_index, VdmaDevice &device,
- HailoRTDriver &driver, const ConfigureNetworkParams &config_params,
- std::shared_ptr<NetworkGroupMetadata> network_group_metadata, const ProtoHEFHwArch &hw_arch)
-{
- const auto num_contexts = network_group_metadata->dynamic_contexts().size() +
- CONTROL_PROTOCOL__CONTEXT_SWITCH_NUMBER_OF_NON_DYNAMIC_CONTEXTS;
- CHECK_AS_EXPECTED(CONTROL_PROTOCOL__MAX_CONTEXTS_PER_NETWORK_GROUP >= num_contexts, HAILO_INVALID_HEF,
- "App '{}' contains more contexts than allowed ({} > {})",
- network_group_metadata->network_group_name(), num_contexts, CONTROL_PROTOCOL__MAX_CONTEXTS_PER_NETWORK_GROUP);
-
- for (auto &network_params : config_params.network_params_by_name) {
- CHECK(HAILO_MAX_BATCH_SIZE >= network_params.second.batch_size, make_unexpected(HAILO_INVALID_ARGUMENT),
- "Given batch size ({}) for network group {}, network {} is bigger than max allowed ({})", network_params.second.batch_size,
- network_group_metadata->network_group_name(), network_params.first, HAILO_MAX_BATCH_SIZE);
- }
-
- auto resources_manager = ResourcesManager::create(device, driver, config_params, network_group_metadata,
- net_group_index);
- CHECK_EXPECTED(resources_manager);
-
- auto status = create_boundary_channels(resources_manager.value(), *network_group_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto activation_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION);
- CHECK_EXPECTED(activation_context);
- status = fill_activation_config_recepies_for_multi_context(activation_context.value().get(),
- resources_manager.value(), network_group_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto batch_switching_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING);
- CHECK_EXPECTED(batch_switching_context);
- status = fill_batch_switching_context_config_recepies_for_multi_context(batch_switching_context.value().get(),
- *network_group_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- const bool is_single_context = network_group_metadata->dynamic_contexts().size() == 1;
-
- auto preliminary_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY,
- network_group_metadata->preliminary_context().config_buffers_info());
- CHECK_EXPECTED(preliminary_context);
- status = fill_preliminary_config_recepies_for_multi_context(hw_arch, preliminary_context.value().get(),
- resources_manager.value(), network_group_metadata, network_group_metadata->preliminary_context(), is_single_context);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- uint8_t context_index = 0;
- for (const auto &context_metadata : network_group_metadata->dynamic_contexts()) {
- auto new_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC,
- context_metadata.config_buffers_info());
- CHECK_EXPECTED(new_context);
-
- status = fill_context_recipes_for_multi_context(hw_arch, new_context.value().get(), resources_manager.value(),
- context_index, *network_group_metadata,
- context_metadata, is_single_context);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- context_index++;
- }
-
- status = resources_manager->create_internal_vdma_channels();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = resources_manager->configure();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto resources_manager_ptr = make_shared_nothrow<ResourcesManager>(resources_manager.release());
- CHECK_NOT_NULL_AS_EXPECTED(resources_manager_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return resources_manager_ptr;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file resource_manager_builder.hpp
- * @brief Builds a ResourcesManager object for the given CoreOp.
- **/
-
-#ifndef _HAILO_RESOURCE_MANAGER_BUILDER_HPP_
-#define _HAILO_RESOURCE_MANAGER_BUILDER_HPP_
-
-#include "hef_internal.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-
-
-namespace hailort
-{
-
-class ResourcesManagerBuilder final {
-public:
- ResourcesManagerBuilder() = delete;
-
- /* TODO HRT-5067 - work with hailo_device_architecture_t instead of ProtoHEFHwArch */
- static Expected<std::shared_ptr<ResourcesManager>> build(uint8_t net_group_index, VdmaDevice &device,
- HailoRTDriver &driver, const ConfigureNetworkParams &config_params,
- std::shared_ptr<NetworkGroupMetadata> network_group_metadata, const ProtoHEFHwArch &hw_arch);
-
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_RESOURCE_MANAGER_BUILDER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hcp_config_activated_network_group.hpp
- * @brief Represent activated network_group from HEF.
- *
- * This network_group can be used for control-cofigured network_groups only (for etherent or pcie)
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_NETWORK_GROUP_HPP_
-#define _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_NETWORK_GROUP_HPP_
-
-#include "hailo/device.hpp"
-#include "common/utils.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/active_network_group_holder.hpp"
-
-#include <vector>
-#include <map>
-
-namespace hailort
-{
-
-struct WriteMemoryInfo
-{
- uint32_t address;
- Buffer data;
-};
-
-class HcpConfigActivatedNetworkGroup : public ActivatedNetworkGroupBase
-{
- public:
- static Expected<HcpConfigActivatedNetworkGroup> create(Device &device, std::vector<WriteMemoryInfo> &config,
- const std::string &network_group_name,
- const hailo_activate_network_group_params_t &network_group_params,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- ActiveNetGroupHolder &active_net_group_holder,
- hailo_power_mode_t power_mode, EventPtr network_group_activated_event,
- ConfiguredNetworkGroupBase &network_group);
-
- virtual ~HcpConfigActivatedNetworkGroup();
- HcpConfigActivatedNetworkGroup(const HcpConfigActivatedNetworkGroup &) = delete;
- HcpConfigActivatedNetworkGroup &operator=(const HcpConfigActivatedNetworkGroup &) = delete;
- HcpConfigActivatedNetworkGroup &operator=(HcpConfigActivatedNetworkGroup &&) = delete;
- HcpConfigActivatedNetworkGroup(HcpConfigActivatedNetworkGroup &&other) noexcept :
- ActivatedNetworkGroupBase(std::move(other)), m_active_net_group_holder(other.m_active_net_group_holder),
- m_is_active(std::exchange(other.m_is_active, false)), m_power_mode(other.m_power_mode),
- m_device(other.m_device), m_network_group_name(std::move(other.m_network_group_name)) {};
-
- virtual const std::string &get_network_group_name() const override;
-
- virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &/*key*/) override
- {
- LOGGER__ERROR("get_intermediate_buffer() is not supported on single_context network_groups");
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- virtual hailo_status set_keep_nn_config_during_reset(const bool /* keep_nn_config_during_reset */) override
- {
- LOGGER__ERROR("set_keep_nn_config_during_reset() is not supported on single_context network_groups");
- return HAILO_INVALID_OPERATION;
- }
-
- private:
- HcpConfigActivatedNetworkGroup(Device &device, ActiveNetGroupHolder &active_net_group_holder,
- const std::string &network_group_name,
- const hailo_activate_network_group_params_t &network_group_params,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- hailo_power_mode_t power_mode, EventPtr &&network_group_activated_event,
- ConfiguredNetworkGroupBase &network_group, hailo_status &status);
-
- ActiveNetGroupHolder &m_active_net_group_holder;
- bool m_is_active;
- hailo_power_mode_t m_power_mode;
- Device &m_device;
- std::string m_network_group_name;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_NETWORK_GROUP_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hcp_config_network_group.hpp
- * @brief Represent network_group from HEF file that can be activated
- *
- * This network_group can be used for control-configured network_groups (for etherent or pcie)
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_HCP_CONFIG_NETWORK_GROUP_HPP_
-#define _HAILO_CONTEXT_SWITCH_HCP_CONFIG_NETWORK_GROUP_HPP_
-
-#include "hailo/device.hpp"
-#include "common/utils.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/active_network_group_holder.hpp"
-#include "hailort_defaults.hpp"
-#include "context_switch/single_context/hcp_config_activated_network_group.hpp"
-
-#include <vector>
-#include <map>
-
-namespace hailort
-{
-
-class HcpConfigNetworkGroup : public ConfiguredNetworkGroupBase
-{
-public:
- HcpConfigNetworkGroup(
- Device &device, ActiveNetGroupHolder &active_net_group_holder, std::vector<WriteMemoryInfo> &&config,
- const ConfigureNetworkParams &config_params, NetworkGroupMetadata &&network_group_metadata, hailo_status &status,
- std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops);
-
- virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
- const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size) override;
- virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
-
- virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
- virtual Expected<std::shared_ptr<VdmaChannel>> get_boundary_vdma_channel_by_stream_name(
- const std::string &stream_name) override;
- virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
- virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
-
- virtual hailo_status activate_impl(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_impl() override;
-
- virtual ~HcpConfigNetworkGroup() = default;
- HcpConfigNetworkGroup(const HcpConfigNetworkGroup &other) = delete;
- HcpConfigNetworkGroup &operator=(const HcpConfigNetworkGroup &other) = delete;
- HcpConfigNetworkGroup &operator=(HcpConfigNetworkGroup &&other) = delete;
- HcpConfigNetworkGroup(HcpConfigNetworkGroup &&other) noexcept : ConfiguredNetworkGroupBase(std::move(other)),
- m_config(std::move(other.m_config)), m_active_net_group_holder(other.m_active_net_group_holder),
- m_device(other.m_device) {}
-
-private:
- std::vector<WriteMemoryInfo> m_config;
- ActiveNetGroupHolder &m_active_net_group_holder;
- Device &m_device;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_HCP_CONFIG_NETWORK_GROUP_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_network_group.cpp
- * @brief: Network Group Wrapper
- **/
-
-#include "vdevice_network_group.hpp"
-#include "vdevice_stream.hpp"
-#include "vdevice_stream_multiplexer_wrapper.hpp"
-#include "vstream_internal.hpp"
-#include "tracer_macros.hpp"
-
-namespace hailort
-{
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> VDeviceActivatedNetworkGroup::create(
- std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> configured_network_groups,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- const hailo_activate_network_group_params_t &network_group_params,
- EventPtr network_group_activated_event, uint16_t dynamic_batch_size,
- AccumulatorPtr deactivation_time_accumulator)
-{
- auto status = HAILO_UNINITIALIZED;
- std::vector<std::unique_ptr<ActivatedNetworkGroup>> activated_network_groups;
- activated_network_groups.reserve(configured_network_groups.size());
- for (auto cng : configured_network_groups) {
- auto ang = cng->create_activated_network_group(network_group_params, dynamic_batch_size);
- CHECK_EXPECTED(ang);
- activated_network_groups.emplace_back(ang.release());
- }
- auto ang = VDeviceActivatedNetworkGroup(std::move(activated_network_groups), input_streams, output_streams,
- network_group_params, network_group_activated_event, deactivation_time_accumulator, status);
-
- CHECK_SUCCESS_AS_EXPECTED(status);
- std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr =
- make_unique_nothrow<VDeviceActivatedNetworkGroup>(std::move(ang));
- CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- status = network_group_activated_event->signal();
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed to signal network activation event");
-
- return activated_net_group_ptr;
-}
-
-const std::string &VDeviceActivatedNetworkGroup::get_network_group_name() const
-{
- // network_group_name is same across all NGs
- return m_activated_network_groups[0]->get_network_group_name();
-}
-
-Expected<Buffer> VDeviceActivatedNetworkGroup::get_intermediate_buffer(const IntermediateBufferKey &key)
-{
- CHECK_AS_EXPECTED(1 == m_activated_network_groups.size(), HAILO_INVALID_OPERATION, "getting intermediate buffer is supported only over single device");
- return m_activated_network_groups[0]->get_intermediate_buffer(key);
-}
-
-hailo_status VDeviceActivatedNetworkGroup::set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset)
-{
- for (auto &activated_network_group : m_activated_network_groups) {
- auto status = activated_network_group->set_keep_nn_config_during_reset(keep_nn_config_during_reset);
- CHECK_SUCCESS(status);
- }
- return HAILO_SUCCESS;
-}
-
-VDeviceActivatedNetworkGroup::VDeviceActivatedNetworkGroup(std::vector<std::unique_ptr<ActivatedNetworkGroup>> &&activated_network_groups,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams, std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- const hailo_activate_network_group_params_t &network_group_params, EventPtr network_group_activated_event, AccumulatorPtr deactivation_time_accumulator, hailo_status &status)
- : ActivatedNetworkGroupBase(network_group_params, input_streams, output_streams, std::move(network_group_activated_event), status),
- m_activated_network_groups(std::move(activated_network_groups)), m_should_reset_network_group(true), m_deactivation_time_accumulator(deactivation_time_accumulator)
-{
-}
-
-VDeviceActivatedNetworkGroup::VDeviceActivatedNetworkGroup(VDeviceActivatedNetworkGroup &&other) noexcept :
- ActivatedNetworkGroupBase(std::move(other)),
- m_activated_network_groups(std::move(other.m_activated_network_groups)),
- m_should_reset_network_group(std::exchange(other.m_should_reset_network_group, false)),
- m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator))
-{
-}
-
-
-Expected<std::shared_ptr<VDeviceNetworkGroup>> VDeviceNetworkGroup::create(std::vector<std::shared_ptr<ConfiguredNetworkGroup>> configured_network_group,
- NetworkGroupSchedulerWeakPtr network_group_scheduler)
-{
- auto status = HAILO_UNINITIALIZED;
- std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> vdma_config_ngs;
- vdma_config_ngs.reserve(configured_network_group.size());
- for (auto &network_group : configured_network_group) {
- auto vdma_ng = std::dynamic_pointer_cast<VdmaConfigNetworkGroup>(network_group);
- assert(nullptr != vdma_ng);
- vdma_config_ngs.push_back(vdma_ng);
- }
-
- auto net_flow_ops_copy = vdma_config_ngs[0]->m_net_flow_ops;
-
- VDeviceNetworkGroup object(std::move(vdma_config_ngs), network_group_scheduler, std::move(net_flow_ops_copy), status);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto obj_ptr = make_shared_nothrow<VDeviceNetworkGroup>(std::move(object));
- CHECK_NOT_NULL_AS_EXPECTED(obj_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return obj_ptr;
-}
-
-Expected<std::shared_ptr<VDeviceNetworkGroup>> VDeviceNetworkGroup::duplicate(std::shared_ptr<VDeviceNetworkGroup> other)
-{
- auto status = HAILO_UNINITIALIZED;
- auto net_flow_ops_copy = other->m_net_flow_ops;
-
- VDeviceNetworkGroup object(other->m_configured_network_groups, other->m_network_group_scheduler, std::move(net_flow_ops_copy), status);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto obj_ptr = make_shared_nothrow<VDeviceNetworkGroup>(std::move(object));
- CHECK_NOT_NULL_AS_EXPECTED(obj_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return obj_ptr;
-}
-
-
-VDeviceNetworkGroup::VDeviceNetworkGroup(std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> configured_network_group,
- NetworkGroupSchedulerWeakPtr network_group_scheduler, std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops, hailo_status &status) :
- ConfiguredNetworkGroupBase(configured_network_group[0]->m_config_params,
- configured_network_group[0]->m_network_group_metadata, std::move(net_flow_ops), status),
- m_configured_network_groups(std::move(configured_network_group)),
- m_network_group_scheduler(network_group_scheduler),
- m_scheduler_handle(INVALID_NETWORK_GROUP_HANDLE),
- m_multiplexer_handle(0),
- m_multiplexer()
-{}
-
-Expected<hailo_stream_interface_t> VDeviceNetworkGroup::get_default_streams_interface()
-{
- auto first_streams_interface = m_configured_network_groups[0]->get_default_streams_interface();
- CHECK_EXPECTED(first_streams_interface);
-#ifndef NDEBUG
- // Check that all physical devices has the same interface
- for (auto &network_group : m_configured_network_groups) {
- auto iface = network_group->get_default_streams_interface();
- CHECK_EXPECTED(iface);
- CHECK_AS_EXPECTED(iface.value() == first_streams_interface.value(), HAILO_INTERNAL_FAILURE,
- "Not all default stream interfaces are the same");
- }
-#endif
- return first_streams_interface;
-}
-
-hailo_status VDeviceNetworkGroup::create_vdevice_streams_from_config_params(std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t scheduler_handle)
-{
- // TODO - HRT-6931 - raise error on this case
- if (((m_config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) && (1 < m_configured_network_groups.size())) {
- LOGGER__WARNING("Latency measurement is not supported on more than 1 physical device.");
- }
-
- m_multiplexer = multiplexer;
-
- for (const auto &stream_parameters_pair : m_config_params.stream_params_by_name) {
- switch (stream_parameters_pair.second.direction) {
- case HAILO_H2D_STREAM:
- {
- auto status = create_input_vdevice_stream_from_config_params(stream_parameters_pair.second,
- stream_parameters_pair.first, multiplexer, scheduler_handle);
- CHECK_SUCCESS(status);
- }
- break;
- case HAILO_D2H_STREAM:
- {
- auto status = create_output_vdevice_stream_from_config_params(stream_parameters_pair.second,
- stream_parameters_pair.first, multiplexer, scheduler_handle);
- CHECK_SUCCESS(status);
- }
- break;
- default:
- LOGGER__ERROR("stream name {} direction is invalid.", stream_parameters_pair.first);
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- for (const auto &input_stream : m_input_streams) {
- auto expected_queue_size = static_cast<InputStreamBase&>(*input_stream.second).get_buffer_frames_size();
- CHECK_EXPECTED_AS_STATUS(expected_queue_size);
- TRACE(CreateNetworkGroupInputStreamsTrace, "", name(), input_stream.first, (uint32_t)expected_queue_size.value());
- }
- for (const auto &output_stream : m_output_streams) {
- if (static_cast<OutputStreamBase&>(*output_stream.second).get_layer_info().format.order == hailo_format_order_t::HAILO_FORMAT_ORDER_HAILO_NMS) {
- continue;
- }
- auto expected_queue_size = static_cast<OutputStreamBase&>(*output_stream.second).get_buffer_frames_size();
- CHECK_EXPECTED_AS_STATUS(expected_queue_size);
- TRACE(CreateNetworkGroupOutputStreamsTrace, "", name(), output_stream.first, (uint32_t)expected_queue_size.value());
- }
-
- auto status = m_multiplexer->add_network_group_instance(m_multiplexer_handle, *this);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceNetworkGroup::create_input_vdevice_stream_from_config_params(const hailo_stream_parameters_t &stream_params,
- const std::string &stream_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t scheduler_handle)
-{
- auto edge_layer = get_layer_info(stream_name);
- CHECK_EXPECTED_AS_STATUS(edge_layer);
-
- CHECK(HailoRTCommon::is_vdma_stream_interface(stream_params.stream_interface), HAILO_INVALID_OPERATION,
- "Stream {} not supported on VDevice usage. {} has {} interface.", stream_name, stream_params.stream_interface);
-
- std::vector<std::reference_wrapper<VdmaInputStream>> low_level_streams;
- low_level_streams.reserve(m_configured_network_groups.size());
- for (auto &net_group : m_configured_network_groups) {
- auto stream = net_group->get_input_stream_by_name(stream_name);
- CHECK(stream, HAILO_INTERNAL_FAILURE);
- low_level_streams.emplace_back(dynamic_cast<VdmaInputStream&>(stream.release().get()));
- }
- auto input_stream = InputVDeviceBaseStream::create(std::move(low_level_streams), edge_layer.value(),
- scheduler_handle, m_network_group_activated_event, m_network_group_scheduler);
- CHECK_EXPECTED_AS_STATUS(input_stream);
- auto input_stream_wrapper = VDeviceInputStreamMultiplexerWrapper::create(input_stream.release(), edge_layer->network_name, multiplexer, scheduler_handle);
- CHECK_EXPECTED_AS_STATUS(input_stream_wrapper);
- m_input_streams.insert(make_pair(stream_name, input_stream_wrapper.release()));
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceNetworkGroup::create_output_vdevice_stream_from_config_params(const hailo_stream_parameters_t &stream_params,
- const std::string &stream_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t scheduler_handle)
-{
- auto edge_layer = get_layer_info(stream_name);
- CHECK_EXPECTED_AS_STATUS(edge_layer);
-
- CHECK(HailoRTCommon::is_vdma_stream_interface(stream_params.stream_interface), HAILO_INVALID_OPERATION,
- "Stream {} not supported on VDevice usage. {} has {} interface.", stream_name, stream_params.stream_interface);
-
- std::vector<std::reference_wrapper<VdmaOutputStream>> low_level_streams;
- low_level_streams.reserve(m_configured_network_groups.size());
- for (auto &net_group : m_configured_network_groups) {
- auto stream = net_group->get_output_stream_by_name(stream_name);
- CHECK(stream, HAILO_INTERNAL_FAILURE);
- low_level_streams.emplace_back(dynamic_cast<VdmaOutputStream&>(stream.release().get()));
- }
- auto output_stream = OutputVDeviceBaseStream::create(std::move(low_level_streams), edge_layer.value(),
- scheduler_handle, m_network_group_activated_event, m_network_group_scheduler);
- CHECK_EXPECTED_AS_STATUS(output_stream);
- auto output_stream_wrapper = VDeviceOutputStreamMultiplexerWrapper::create(output_stream.release(), edge_layer->network_name, multiplexer, scheduler_handle);
- CHECK_EXPECTED_AS_STATUS(output_stream_wrapper);
- m_output_streams.insert(make_pair(stream_name, output_stream_wrapper.release()));
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceNetworkGroup::create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceNetworkGroup> other)
-{
- // TODO - HRT-6931 - raise error on this case
- if (((m_config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) && (1 < m_configured_network_groups.size())) {
- LOGGER__WARNING("Latency measurement is not supported on more than 1 physical device.");
- }
-
- m_multiplexer = other->m_multiplexer;
- m_multiplexer_handle = other->multiplexer_duplicates_count() + 1;
-
- for (auto &name_stream_pair : other->m_input_streams) {
- auto input_stream = static_cast<VDeviceInputStreamMultiplexerWrapper*>(name_stream_pair.second.get());
- auto copy = input_stream->clone(m_multiplexer_handle);
- CHECK_EXPECTED_AS_STATUS(copy);
-
- m_input_streams.insert(make_pair(name_stream_pair.first, copy.release()));
- }
-
- for (auto &name_stream_pair : other->m_output_streams) {
- auto output_stream = static_cast<VDeviceOutputStreamMultiplexerWrapper*>(name_stream_pair.second.get());
- auto copy = output_stream->clone(m_multiplexer_handle);
- CHECK_EXPECTED_AS_STATUS(copy);
-
- m_output_streams.insert(make_pair(name_stream_pair.first, copy.release()));
- }
-
- auto status = other->m_multiplexer->add_network_group_instance(m_multiplexer_handle, *this);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-void VDeviceNetworkGroup::set_network_group_handle(scheduler_ng_handle_t handle)
-{
- m_scheduler_handle = handle;
-}
-
-scheduler_ng_handle_t VDeviceNetworkGroup::network_group_handle() const
-{
- return m_scheduler_handle;
-}
-
-hailo_status VDeviceNetworkGroup::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
-{
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INVALID_OPERATION,
- "Cannot set scheduler timeout for network group {}, as it is configured on a vdevice which does not have scheduling enabled", name());
- if (network_name != HailoRTDefaults::get_network_name(name())) {
- CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler timeout for a specific network is currently not supported");
- }
- auto status = network_group_scheduler->set_timeout(m_scheduler_handle, timeout, network_name);
- CHECK_SUCCESS(status);
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceNetworkGroup::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
-{
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INVALID_OPERATION,
- "Cannot set scheduler threshold for network group {}, as it is configured on a vdevice which does not have scheduling enabled", name());
- if (network_name != HailoRTDefaults::get_network_name(name())) {
- CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler threshold for a specific network is currently not supported");
- }
- auto status = network_group_scheduler->set_threshold(m_scheduler_handle, threshold, network_name);
- CHECK_SUCCESS(status);
- return HAILO_SUCCESS;
-}
-
-Expected<std::shared_ptr<LatencyMetersMap>> VDeviceNetworkGroup::get_latency_meters()
-{
- return m_configured_network_groups[0]->get_latency_meters();
-}
-
-Expected<std::shared_ptr<VdmaChannel>> VDeviceNetworkGroup::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
-{
- if (1 < m_configured_network_groups.size()) {
- LOGGER__ERROR("get_boundary_vdma_channel_by_stream_name function is not supported on more than 1 physical device.");
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- return m_configured_network_groups[0]->get_boundary_vdma_channel_by_stream_name(stream_name);
-}
-
-Expected<std::vector<OutputVStream>> VDeviceNetworkGroup::create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params)
-{
- auto expected = ConfiguredNetworkGroupBase::create_output_vstreams(outputs_params);
- CHECK_EXPECTED(expected);
-
- if (nullptr == m_multiplexer) {
- return expected.release();
- }
-
- m_multiplexer->set_output_vstreams_names(m_multiplexer_handle, expected.value());
-
- for (auto &vstream : expected.value()) {
- static_cast<OutputVStreamImpl&>(*vstream.m_vstream).set_on_vstream_cant_read_callback([this, name = vstream.name()] () {
- m_multiplexer->set_can_output_vstream_read(m_multiplexer_handle, name, false);
- });
- static_cast<OutputVStreamImpl&>(*vstream.m_vstream).set_on_vstream_can_read_callback([this, name = vstream.name()] () {
- m_multiplexer->set_can_output_vstream_read(m_multiplexer_handle, name, true);
- });
- }
-
- return expected.release();
-}
-
-Expected<std::shared_ptr<VdmaConfigNetworkGroup>> VDeviceNetworkGroup::get_network_group_by_device_index(uint32_t device_index)
-{
- CHECK_AS_EXPECTED(device_index <m_configured_network_groups.size(), HAILO_INVALID_ARGUMENT);
- auto vdma_config_network_group = m_configured_network_groups[device_index];
- return vdma_config_network_group;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_network_group.hpp
- * @brief Class declaration for VDeviceNetworkGroup, a wrapper around ConfiguredNetworkGroupBase which is used
- * to support multiple ConfiguredNetworkGroup objects that encapsulate the same actual configured network group.
- **/
-
-#ifndef _HAILO_VDEVICE_NETWORK_GROUP_WRAPPER_HPP_
-#define _HAILO_VDEVICE_NETWORK_GROUP_WRAPPER_HPP_
-
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-#include "hailo/network_group.hpp"
-#include "hailo/vstream.hpp"
-
-#include "context_switch/multi_context/vdma_config_network_group.hpp"
-#include "network_group_internal.hpp"
-#include "network_group_scheduler.hpp"
-#include "pipeline_multiplexer.hpp"
-
-#include <cstdint>
-
-namespace hailort
-{
-
-class VDeviceActivatedNetworkGroup : public ActivatedNetworkGroupBase
-{
-public:
- static Expected<std::unique_ptr<ActivatedNetworkGroup>> create(std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> configured_network_groups,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- const hailo_activate_network_group_params_t &network_group_params, EventPtr network_group_activated_event,
- uint16_t dynamic_batch_size, AccumulatorPtr deactivation_time_accumulator);
-
- virtual ~VDeviceActivatedNetworkGroup()
- {
- if (!m_should_reset_network_group) {
- return;
- }
- const auto start_time = std::chrono::steady_clock::now();
-
- m_network_group_activated_event->reset();
- m_activated_network_groups.clear();
-
- const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
- std::chrono::steady_clock::now() - start_time).count();
- LOGGER__INFO("Deactivating took {} ms", elapsed_time_ms);
- m_deactivation_time_accumulator->add_data_point(elapsed_time_ms);
- }
-
- VDeviceActivatedNetworkGroup(const VDeviceActivatedNetworkGroup &other) = delete;
- VDeviceActivatedNetworkGroup &operator=(const VDeviceActivatedNetworkGroup &other) = delete;
- VDeviceActivatedNetworkGroup &operator=(VDeviceActivatedNetworkGroup &&other) = delete;
- VDeviceActivatedNetworkGroup(VDeviceActivatedNetworkGroup &&other) noexcept;
-
- virtual const std::string &get_network_group_name() const override;
- virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override;
- virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) override;
-
-private:
- VDeviceActivatedNetworkGroup(
- std::vector<std::unique_ptr<ActivatedNetworkGroup>> &&activated_network_groups,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- const hailo_activate_network_group_params_t &network_group_params, EventPtr network_group_activated_event,
- AccumulatorPtr deactivation_time_accumulator, hailo_status &status);
-
- std::vector<std::unique_ptr<ActivatedNetworkGroup>> m_activated_network_groups;
- bool m_should_reset_network_group;
- AccumulatorPtr m_deactivation_time_accumulator;
-};
-
-class VDeviceNetworkGroup : public ConfiguredNetworkGroupBase
-{
-public:
- // TODO (HRT-8751): remove duplicate members from this class or from vdma_config_network _group
- static Expected<std::shared_ptr<VDeviceNetworkGroup>> create(std::vector<std::shared_ptr<ConfiguredNetworkGroup>> configured_network_group,
- NetworkGroupSchedulerWeakPtr network_group_scheduler);
-
- static Expected<std::shared_ptr<VDeviceNetworkGroup>> duplicate(std::shared_ptr<VDeviceNetworkGroup> other);
-
- virtual ~VDeviceNetworkGroup() = default;
- VDeviceNetworkGroup(const VDeviceNetworkGroup &other) = delete;
- VDeviceNetworkGroup &operator=(const VDeviceNetworkGroup &other) = delete;
- VDeviceNetworkGroup &operator=(VDeviceNetworkGroup &&other) = delete;
- VDeviceNetworkGroup(VDeviceNetworkGroup &&other) = default;
-
- // Function from vdma network group
- hailo_status create_vdevice_streams_from_config_params(std::shared_ptr<PipelineMultiplexer> multiplexer,
- scheduler_ng_handle_t scheduler_handle);
- hailo_status create_input_vdevice_stream_from_config_params(
- const hailo_stream_parameters_t &stream_params, const std::string &stream_name,
- std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t scheduler_handle);
- hailo_status create_output_vdevice_stream_from_config_params(
- const hailo_stream_parameters_t &stream_params, const std::string &stream_name,
- std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t scheduler_handle);
-
- hailo_status create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceNetworkGroup> other);
-
- bool equals(const Hef &hef, const std::string &network_group_name)
- {
- return m_configured_network_groups[0]->equals(hef, network_group_name);
- }
-
- uint32_t multiplexer_duplicates_count()
- {
- assert(m_multiplexer->instances_count() > 0);
- return static_cast<uint32_t>(m_multiplexer->instances_count() - 1);
- }
-
- virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
-
- virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
- virtual Expected<std::shared_ptr<VdmaChannel>> get_boundary_vdma_channel_by_stream_name(
- const std::string &stream_name) override;
-
- void set_network_group_handle(scheduler_ng_handle_t handle);
- scheduler_ng_handle_t network_group_handle() const;
- virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
- virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
-
- virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
-
- virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override
- {
- CHECK(!m_network_group_scheduler.lock(), HAILO_INVALID_OPERATION,
- "Waiting for network group activation is not allowed when the network group scheduler is active!");
-
- return m_network_group_activated_event->wait(timeout);
- }
-
- virtual hailo_status activate_impl(uint16_t /*dynamic_batch_size*/) override
- {
- return HAILO_INTERNAL_FAILURE;
- }
-
- virtual hailo_status deactivate_impl() override
- {
- return HAILO_INTERNAL_FAILURE;
- }
-
- virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
- const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size) override
- {
- auto start_time = std::chrono::steady_clock::now();
-
- CHECK_AS_EXPECTED(!m_network_group_scheduler.lock(), HAILO_INVALID_OPERATION,
- "Manually activating a network group is not allowed when the network group scheduler is active!");
-
- auto res = VDeviceActivatedNetworkGroup::create(m_configured_network_groups, m_input_streams, m_output_streams,
- network_group_params, m_network_group_activated_event, dynamic_batch_size, m_deactivation_time_accumulator);
- const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
- std::chrono::steady_clock::now() - start_time).count();
- CHECK_EXPECTED(res);
-
- LOGGER__INFO("Activating {} on VDevice took {} milliseconds. Note that the function is asynchronous and"
- " thus the network is not fully activated yet.", name(), elapsed_time_ms);
- m_activation_time_accumulator->add_data_point(elapsed_time_ms);
-
- return res;
- }
-
- Expected<std::shared_ptr<VdmaConfigNetworkGroup>> get_network_group_by_device_index(uint32_t device_index);
-
-private:
- VDeviceNetworkGroup(std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> configured_network_group,
- NetworkGroupSchedulerWeakPtr network_group_scheduler, std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops,
- hailo_status &status);
-
- std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> m_configured_network_groups;
- NetworkGroupSchedulerWeakPtr m_network_group_scheduler;
- scheduler_ng_handle_t m_scheduler_handle;
- multiplexer_ng_handle_t m_multiplexer_handle;
- std::shared_ptr<PipelineMultiplexer> m_multiplexer;
-};
-
-}
-
-#endif /* _HAILO_VDEVICE_NETWORK_GROUP_WRAPPER_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file multi_context_activatedN_network_group.cpp
- * @brief VdmaConfigActivatedNetworkGroup implementation
- **/
-
-#include "context_switch/multi_context/vdma_config_activated_network_group.hpp"
-#include "control.hpp"
-#include <chrono>
-
-namespace hailort
-{
-
-Expected<VdmaConfigActivatedNetworkGroup> VdmaConfigActivatedNetworkGroup::create(
- ActiveNetGroupHolder &active_net_group_holder,
- const std::string &network_group_name,
- std::shared_ptr<ResourcesManager> resources_manager,
- const hailo_activate_network_group_params_t &network_group_params,
- uint16_t dynamic_batch_size,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- EventPtr network_group_activated_event,
- AccumulatorPtr deactivation_time_accumulator,
- ConfiguredNetworkGroupBase &network_group)
-{
- CHECK(!active_net_group_holder.is_any_active(), make_unexpected(HAILO_INVALID_OPERATION),
- "network group is currently active. You must deactivate before activating another network_group");
-
- CHECK_ARG_NOT_NULL_AS_EXPECTED(deactivation_time_accumulator);
-
- auto status = HAILO_UNINITIALIZED;
- VdmaConfigActivatedNetworkGroup object(network_group_name, network_group_params, dynamic_batch_size, input_streams, output_streams,
- std::move(resources_manager), active_net_group_holder, std::move(network_group_activated_event),
- deactivation_time_accumulator, network_group, status);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return object;
-}
-
-VdmaConfigActivatedNetworkGroup::VdmaConfigActivatedNetworkGroup(
- const std::string &network_group_name,
- const hailo_activate_network_group_params_t &network_group_params,
- uint16_t dynamic_batch_size,
- std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
- std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
- std::shared_ptr<ResourcesManager> &&resources_manager,
- ActiveNetGroupHolder &active_net_group_holder,
- EventPtr &&network_group_activated_event,
- AccumulatorPtr deactivation_time_accumulator,
- ConfiguredNetworkGroupBase &network_group,
- hailo_status &status) :
- ActivatedNetworkGroupBase(network_group_params, input_streams, output_streams,
- std::move(network_group_activated_event), status),
- m_network_group_name(network_group_name),
- m_should_reset_network_group(true),
- m_active_net_group_holder(active_net_group_holder),
- m_resources_manager(std::move(resources_manager)),
- m_deactivation_time_accumulator(deactivation_time_accumulator),
- m_keep_nn_config_during_reset(false)
-{
- // Validate ActivatedNetworkGroup status
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- // We know network_group is a VdmaConfigNetworkGroup
- status = network_group.activate_impl(dynamic_batch_size);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Error activating network group");
- return;
- }
-}
-
-VdmaConfigActivatedNetworkGroup::VdmaConfigActivatedNetworkGroup(VdmaConfigActivatedNetworkGroup &&other) noexcept :
- ActivatedNetworkGroupBase(std::move(other)),
- m_network_group_name(std::move(other.m_network_group_name)),
- m_should_reset_network_group(std::exchange(other.m_should_reset_network_group, false)),
- m_active_net_group_holder(other.m_active_net_group_holder),
- m_resources_manager(std::move(other.m_resources_manager)),
- m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator)),
- m_keep_nn_config_during_reset(std::move(other.m_keep_nn_config_during_reset))
-{}
-
-VdmaConfigActivatedNetworkGroup::~VdmaConfigActivatedNetworkGroup()
-{
- if (!m_should_reset_network_group) {
- return;
- }
-
- auto status = HAILO_UNINITIALIZED;
- const auto start_time = std::chrono::steady_clock::now();
-
- auto config_network_group_ref = m_active_net_group_holder.get();
- if (!config_network_group_ref.has_value()) {
- LOGGER__ERROR("Error getting configured network group");
- return;
- }
-
- auto vdma_config_network_group = config_network_group_ref.value();
-
- status = vdma_config_network_group.get().deactivate_impl();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed deactivating network group");
- }
-
- status = m_resources_manager->reset_state_machine(m_keep_nn_config_during_reset);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to reset context switch with status {}", status);
- }
-
- const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
- std::chrono::steady_clock::now() - start_time).count();
- LOGGER__INFO("Deactivating took {} ms", elapsed_time_ms);
- m_deactivation_time_accumulator->add_data_point(elapsed_time_ms);
-}
-
-const std::string &VdmaConfigActivatedNetworkGroup::get_network_group_name() const
-{
- return m_network_group_name;
-}
-
-Expected<Buffer> VdmaConfigActivatedNetworkGroup::get_intermediate_buffer(const IntermediateBufferKey &key)
-{
- return m_resources_manager->read_intermediate_buffer(key);
-}
-
-hailo_status VdmaConfigActivatedNetworkGroup::set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset)
-{
- m_keep_nn_config_during_reset = keep_nn_config_during_reset;
- return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
+++ /dev/null
-#include "tracer_macros.hpp"
-#include "context_switch/multi_context/vdma_config_network_group.hpp"
-#include "network_group_internal.hpp"
-#include "eth_stream.hpp"
-#include "pcie_stream.hpp"
-#include "mipi_stream.hpp"
-#include "vstream_internal.hpp"
-
-namespace hailort
-{
-
-Expected<VdmaConfigNetworkGroup> VdmaConfigNetworkGroup::create(ActiveNetGroupHolder &active_net_group_holder,
- const ConfigureNetworkParams &config_params,
- std::shared_ptr<ResourcesManager> resources_manager, const std::string &hef_hash,
- std::shared_ptr<NetworkGroupMetadata> network_group_metadata,
- std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops)
-{
- auto status = HAILO_UNINITIALIZED;
-
- VdmaConfigNetworkGroup object(active_net_group_holder, config_params,
- std::move(resources_manager), hef_hash, *network_group_metadata, status, std::move(net_flow_ops));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return object;
-}
-
-VdmaConfigNetworkGroup::VdmaConfigNetworkGroup(ActiveNetGroupHolder &active_net_group_holder,
- const ConfigureNetworkParams &config_params,
- std::shared_ptr<ResourcesManager> &&resources_manager, const std::string &hef_hash,
- const NetworkGroupMetadata &network_group_metadata, hailo_status &status,
- std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops) :
- ConfiguredNetworkGroupBase(config_params,
- network_group_metadata, std::move(net_flow_ops), status),
- m_active_net_group_holder(active_net_group_holder),
- m_resources_manager(std::move(resources_manager)),
- m_hef_hash(hef_hash)
-{}
-
-hailo_status VdmaConfigNetworkGroup::activate_impl(uint16_t dynamic_batch_size)
-{
- auto status = HAILO_UNINITIALIZED;
-
- // Check that no network is currently activated
- CHECK(!m_active_net_group_holder.is_any_active(), HAILO_INTERNAL_FAILURE,
- "Cant activate network because a network is already activated");
-
- m_active_net_group_holder.set(*this);
-
- status = m_resources_manager->register_fw_managed_vdma_channels();
- CHECK_SUCCESS(status, "Failed to start fw managed vdma channels.");
-
- status = m_resources_manager->set_inter_context_channels_dynamic_batch_size(dynamic_batch_size);
- CHECK_SUCCESS(status, "Failed to set inter-context channels dynamic batch size.");
-
- status = m_resources_manager->enable_state_machine(dynamic_batch_size);
- CHECK_SUCCESS(status, "Failed to activate state-machine");
-
- status = activate_low_level_streams(dynamic_batch_size);
- CHECK_SUCCESS(status, "Failed to activate low level streams");
-
- status = m_network_group_activated_event->signal();
- CHECK_SUCCESS(status, "Failed to signal network activation event");
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaConfigNetworkGroup::deactivate_impl()
-{
- auto status = HAILO_UNINITIALIZED;
-
- // Check that network is currently activated
- CHECK(m_active_net_group_holder.is_any_active(), HAILO_INTERNAL_FAILURE,
- "Cant Deactivate network because no network is already activated");
-
- // Make sure the network group we are deactivating is this object
- auto config_network_group_ref = m_active_net_group_holder.get().value();
- CHECK(this == std::addressof(config_network_group_ref.get()), HAILO_INTERNAL_FAILURE,
- "Trying to deactivate different network goup");
-
- m_active_net_group_holder.clear();
-
- m_network_group_activated_event->reset();
-
- status = deactivate_low_level_streams();
- CHECK_SUCCESS(status, "Failed to deactivate low level streams");
-
- status = m_resources_manager->unregister_fw_managed_vdma_channels();
- CHECK_SUCCESS(status, "Failed to stop fw managed vdma channels");
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> VdmaConfigNetworkGroup::create_activated_network_group(
- const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size)
-{
- auto start_time = std::chrono::steady_clock::now();
- auto activated_net_group = VdmaConfigActivatedNetworkGroup::create(
- m_active_net_group_holder, name(), m_resources_manager, network_group_params, dynamic_batch_size,
- m_input_streams, m_output_streams, m_network_group_activated_event, m_deactivation_time_accumulator, (*this));
- const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
- std::chrono::steady_clock::now() - start_time).count();
- CHECK_EXPECTED(activated_net_group);
-
- LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and"
- " thus the network is not fully activated yet.", name(), elapsed_time_ms);
- m_activation_time_accumulator->add_data_point(elapsed_time_ms);
-
- std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr =
- make_unique_nothrow<VdmaConfigActivatedNetworkGroup>(activated_net_group.release());
- CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return activated_net_group_ptr;
-}
-
-Expected<hailo_stream_interface_t> VdmaConfigNetworkGroup::get_default_streams_interface()
-{
- return m_resources_manager->get_default_streams_interface();
-}
-
-hailo_status VdmaConfigNetworkGroup::set_scheduler_timeout(const std::chrono::milliseconds &/*timeout*/, const std::string &/*network_name*/)
-{
- LOGGER__ERROR("Setting scheduler's timeout is only allowed when working with VDevice and scheduler enabled");
- return HAILO_INVALID_OPERATION;
-}
-
-hailo_status VdmaConfigNetworkGroup::set_scheduler_threshold(uint32_t /*threshold*/, const std::string &/*network_name*/)
-{
- LOGGER__ERROR("Setting scheduler's threshold is only allowed when working with VDevice and scheduler enabled");
- return HAILO_INVALID_OPERATION;
-}
-
-Expected<std::shared_ptr<LatencyMetersMap>> VdmaConfigNetworkGroup::get_latency_meters()
-{
- auto latency_meters = m_resources_manager->get_latency_meters();
- return make_shared_nothrow<LatencyMetersMap>(latency_meters);
-}
-
-Expected<std::shared_ptr<VdmaChannel>> VdmaConfigNetworkGroup::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
-{
- return m_resources_manager->get_boundary_vdma_channel_by_stream_name(stream_name);
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file control.cpp
- * @brief Implements module which allows controling Hailo chip.
- **/
-
-#include "common/utils.hpp"
-#include "control.hpp"
-#include "common/logger_macros.hpp"
-#include "control_protocol.h"
-#include "byte_order.h"
-#include "firmware_status.h"
-#include "firmware_header_utils.h"
-#include "d2h_events.h"
-#include "hw_consts.hpp"
-#include <array>
-#include "hef_internal.hpp"
-
-namespace hailort
-{
-
-#ifndef MIN
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-#endif
-
-#define POWER_MEASUREMENT_DELAY_MS(__sample_period, __average_factor) \
- (static_cast<uint32_t>((__sample_period) / 1000.0 * (__average_factor) * 2 * 1.2))
-
-#define OVERCURRENT_PROTECTION_WARNING ( \
- "Using the overcurrent protection dvm for power measurement will disable the overcurrent protection.\n" \
- "If only taking one measurement, the protection will resume automatically.\n" \
- "If doing continuous measurement, to enable overcurrent protection again you have to stop the power measurement on this dvm." \
- )
-
-typedef std::array<std::array<float64_t, CONTROL_PROTOCOL__POWER_MEASUREMENT_TYPES__COUNT>, CONTROL_PROTOCOL__DVM_OPTIONS_COUNT> power_conversion_multiplier_t;
-
-
-Expected<hailo_device_identity_t> control__parse_identify_results(CONTROL_PROTOCOL_identify_response_t *identify_response)
-{
- hailo_device_identity_t board_info;
-
- CHECK_AS_EXPECTED(nullptr != identify_response, HAILO_INVALID_ARGUMENT);
-
- // Store identify response inside control
- board_info.protocol_version = BYTE_ORDER__ntohl(identify_response->protocol_version);
- board_info.logger_version = BYTE_ORDER__ntohl(identify_response->logger_version);
- (void)memcpy(&(board_info.fw_version),
- &(identify_response->fw_version),
- sizeof(board_info.fw_version));
- board_info.board_name_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->board_name_length);
- (void)memcpy(&(board_info.board_name),
- &(identify_response->board_name),
- BYTE_ORDER__ntohl(identify_response->board_name_length));
- board_info.serial_number_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->serial_number_length);
- (void)memcpy(&(board_info.serial_number),
- &(identify_response->serial_number),
- BYTE_ORDER__ntohl(identify_response->serial_number_length));
- board_info.part_number_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->part_number_length);
- (void)memcpy(&(board_info.part_number),
- &(identify_response->part_number),
- BYTE_ORDER__ntohl(identify_response->part_number_length));
- board_info.product_name_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->product_name_length);
- (void)memcpy(&(board_info.product_name),
- &(identify_response->product_name),
- BYTE_ORDER__ntohl(identify_response->product_name_length));
-
- // Check if the firmware is debug or release
- board_info.is_release = (!IS_REVISION_DEV(board_info.fw_version.revision));
-
- // Check if the firmware was compiled with EXTENDED_CONTEXT_SWITCH_BUFFER
- board_info.extended_context_switch_buffer = IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(board_info.fw_version.revision);
-
- // Make sure response was from app CPU
- CHECK_AS_EXPECTED((0 == (board_info.fw_version.revision & REVISION_APP_CORE_FLAG_BIT_MASK)), HAILO_INVALID_FIRMWARE,
- "Got invalid app FW type, which means the FW was not marked correctly. unmaked FW revision {}", board_info.fw_version.revision);
-
- // Keep the revision number only
- board_info.fw_version.revision = GET_REVISION_NUMBER_VALUE(board_info.fw_version.revision);
-
- board_info.device_architecture = static_cast<hailo_device_architecture_t>(BYTE_ORDER__ntohl(identify_response->device_architecture));
-
- /* Write identify results to log */
- LOGGER__INFO("firmware_version is: {}.{}.{}",
- board_info.fw_version.major,
- board_info.fw_version.minor,
- board_info.fw_version.revision
- );
- LOGGER__DEBUG("Protocol version: {}", board_info.protocol_version);
- LOGGER__DEBUG("Logger version: {}", board_info.logger_version);
- LOGGER__DEBUG("Device architecture code: {}", board_info.device_architecture);
-
- return board_info;
-}
-
-Expected<hailo_extended_device_information_t> control__parse_get_extended_device_information_results
- (CONTROL_PROTOCOL__get_extended_device_information_response_t &get_extended_device_information_response)
-{
- uint8_t local_supported_features;
- hailo_extended_device_information_t device_info;
-
- local_supported_features = (uint8_t)BYTE_ORDER__ntohl(get_extended_device_information_response.supported_features);
-
- device_info.supported_features.ethernet = (local_supported_features &
- (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_ETHERNET_BIT_OFFSET)) != 0;
- device_info.supported_features.pcie = (local_supported_features &
- (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_PCIE_BIT_OFFSET)) != 0;
- device_info.supported_features.mipi = (local_supported_features &
- (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_MIPI_BIT_OFFSET)) != 0;
- device_info.supported_features.current_monitoring = (local_supported_features &
- (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_CURRENT_MONITORING_BIT_OFFSET)) != 0;
- device_info.supported_features.mdio = (local_supported_features &
- (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_MDIO_BIT_OFFSET)) != 0;
- device_info.neural_network_core_clock_rate = BYTE_ORDER__ntohl(get_extended_device_information_response.neural_network_core_clock_rate);
-
- LOGGER__DEBUG("Max Neural Network Core Clock Rate: {}", device_info.neural_network_core_clock_rate);
-
- device_info.boot_source = static_cast<hailo_device_boot_source_t>(
- BYTE_ORDER__ntohl(get_extended_device_information_response.boot_source));
-
- (void)memcpy(device_info.soc_id,
- get_extended_device_information_response.soc_id,
- BYTE_ORDER__ntohl(get_extended_device_information_response.soc_id_length));
-
- device_info.lcs = get_extended_device_information_response.lcs;
-
- memcpy(&device_info.unit_level_tracking_id[0], &get_extended_device_information_response.fuse_info, sizeof(device_info.unit_level_tracking_id));
- memcpy(&device_info.eth_mac_address[0], &get_extended_device_information_response.eth_mac_address[0], BYTE_ORDER__ntohl(get_extended_device_information_response.eth_mac_length));
- memcpy(&device_info.soc_pm_values, &get_extended_device_information_response.pd_info, sizeof(device_info.soc_pm_values));
-
- return device_info;
-}
-
-Expected<hailo_health_info_t> control__parse_get_health_information_results
- (CONTROL_PROTOCOL__get_health_information_response_t *get_health_information_response)
-{
- hailo_health_info_t health_info;
-
- CHECK_AS_EXPECTED(nullptr != get_health_information_response, HAILO_INVALID_ARGUMENT);
-
- health_info.overcurrent_protection_active = get_health_information_response->overcurrent_protection_active;
- health_info.current_overcurrent_zone = get_health_information_response->current_overcurrent_zone;
- // Re-convertion to floats after
- health_info.red_overcurrent_threshold = float32_t(BYTE_ORDER__ntohl(get_health_information_response->red_overcurrent_threshold));
- health_info.overcurrent_throttling_active = get_health_information_response->overcurrent_throttling_active;
- health_info.temperature_throttling_active = get_health_information_response->temperature_throttling_active;
- health_info.current_temperature_zone = get_health_information_response->current_temperature_zone;
- health_info.current_temperature_throttling_level = get_health_information_response->current_temperature_throttling_level;
- memcpy(&health_info.temperature_throttling_levels[0], &get_health_information_response->temperature_throttling_levels[0],
- BYTE_ORDER__ntohl(get_health_information_response->temperature_throttling_levels_length));
- health_info.orange_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->orange_temperature_threshold);
- health_info.orange_hysteresis_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->orange_hysteresis_temperature_threshold);
- health_info.red_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->red_temperature_threshold);
- health_info.red_hysteresis_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->red_hysteresis_temperature_threshold);
- health_info.requested_overcurrent_clock_freq = BYTE_ORDER__ntohl(get_health_information_response->requested_overcurrent_clock_freq);
- health_info.requested_temperature_clock_freq = BYTE_ORDER__ntohl(get_health_information_response->requested_temperature_clock_freq);
- return health_info;
-}
-
-
-hailo_status control__parse_core_identify_results(CONTROL_PROTOCOL__core_identify_response_t *identify_response,
- hailo_core_information_t *core_info)
-{
- CHECK_ARG_NOT_NULL(core_info);
- CHECK_ARG_NOT_NULL(identify_response);
-
- // Store identify response inside control
- (void)memcpy(&(core_info->fw_version),
- &(identify_response->fw_version),
- sizeof(core_info->fw_version));
-
- // Check if firmware is at debug/release
- core_info->is_release = !(IS_REVISION_DEV(core_info->fw_version.revision));
-
- // Check if the firmware was compiled with EXTENDED_CONTEXT_SWITCH_BUFFER
- core_info->extended_context_switch_buffer = IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(core_info->fw_version.revision);
-
- // Make sure response was from core CPU
- CHECK((REVISION_APP_CORE_FLAG_BIT_MASK == (core_info->fw_version.revision & REVISION_APP_CORE_FLAG_BIT_MASK)), HAILO_INVALID_FIRMWARE,
- "Got invalid core FW type, which means the FW was not marked correctly. unmaked FW revision {}", core_info->fw_version.revision);
-
- // Keep the revision number only
- core_info->fw_version.revision = GET_REVISION_NUMBER_VALUE(core_info->fw_version.revision);
-
- // Write identify results to log
- LOGGER__INFO("core firmware_version is: {}.{}.{}",
- core_info->fw_version.major,
- core_info->fw_version.minor,
- core_info->fw_version.revision
- );
-
- return HAILO_SUCCESS;
-}
-
-
-hailo_status Control::parse_and_validate_response(uint8_t *message, uint32_t message_size,
- CONTROL_PROTOCOL__response_header_t **header, CONTROL_PROTOCOL__payload_t **payload,
- CONTROL_PROTOCOL__request_t *request)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__status_t fw_status = {};
- const char *firmware_status_text = NULL;
-
- /* Parse the response */
- common_status = CONTROL_PROTOCOL__parse_response(message, message_size, header, payload, &fw_status);
- if (HAILO_STATUS__CONTROL_PROTOCOL__INVALID_VERSION == common_status) {
- status = HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION;
- }
- else {
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- }
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- /* Valdiate response was succesfull - both major and minor should be error free */
- if (0 != fw_status.major_status) {
- status = HAILO_FW_CONTROL_FAILURE;
- LOGGER__ERROR("Firmware control has failed. Major status: {:#x}, Minor status: {:#x}",
- fw_status.major_status,
- fw_status.minor_status);
- common_status = FIRMWARE_STATUS__get_textual((FIRMWARE_STATUS_t)fw_status.major_status, &firmware_status_text);
- if (HAILO_COMMON_STATUS__SUCCESS == common_status) {
- LOGGER__ERROR("Firmware major status: {}", firmware_status_text);
- } else {
- LOGGER__ERROR("Cannot find textual address for firmware status {:#x}, common_status = {}",
- (FIRMWARE_STATUS_t)fw_status.major_status, common_status);
- }
- common_status = FIRMWARE_STATUS__get_textual((FIRMWARE_STATUS_t)fw_status.minor_status, &firmware_status_text);
- if (HAILO_COMMON_STATUS__SUCCESS == common_status) {
- LOGGER__ERROR("Firmware minor status: {}", firmware_status_text);
- } else {
- LOGGER__ERROR("Cannot find textual address for firmware status {:#x}, common_status = {}",
- (FIRMWARE_STATUS_t)fw_status.minor_status, common_status);
- }
-
- if ((HAILO_CONTROL_STATUS_UNSUPPORTED_OPCODE == fw_status.minor_status) ||
- (HAILO_CONTROL_STATUS_UNSUPPORTED_OPCODE == fw_status.major_status)) {
- status = HAILO_UNSUPPORTED_OPCODE;
- LOGGER__ERROR("Opcode {} is not supported",
- CONTROL_PROTOCOL__get_textual_opcode((CONTROL_PROTOCOL__OPCODE_t)BYTE_ORDER__ntohl(request->header.common_header.opcode)));
- }
- goto exit;
- }
-
- /* Validate response opcode is same as request */
- if (request->header.common_header.opcode != (*header)->common_header.opcode) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Invalid opcode received from FW");
- goto exit;
- }
-
- /* Validate response version is same as request */
- if (request->header.common_header.version != (*header)->common_header.version) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Invalid protocol version received from FW");
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-Expected<hailo_device_identity_t> Control::identify(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL_identify_response_t *identify_response = NULL;
-
- /* Validate arguments */
- common_status = CONTROL_PROTOCOL__pack_identify_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS_AS_EXPECTED(status);
- identify_response = (CONTROL_PROTOCOL_identify_response_t *)(payload->parameters);
-
- return control__parse_identify_results(identify_response);
-}
-
-hailo_status Control::core_identify(Device &device, hailo_core_information_t *core_info)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__core_identify_response_t *identify_response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(core_info);
-
- common_status = CONTROL_PROTOCOL__pack_core_identify_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- identify_response = (CONTROL_PROTOCOL__core_identify_response_t *)(payload->parameters);
-
- /* Store results inside contol object */
- status = control__parse_core_identify_results(identify_response, core_info);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-
-hailo_status Control::set_fw_logger(Device &device, hailo_fw_logger_level_t level, uint32_t interface_mask)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
-
- auto common_status = CONTROL_PROTOCOL__pack_set_fw_logger_request(&request, &request_size, device.get_control_sequence(), level,
- static_cast<uint8_t>(interface_mask));
-
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::set_clock_freq(Device &device, uint32_t clock_freq)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
-
- auto common_status = CONTROL_PROTOCOL__pack_set_clock_freq_request(&request, &request_size, device.get_control_sequence(), clock_freq);
-
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::set_throttling_state(Device &device, bool should_activate)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
-
- auto common_status = CONTROL_PROTOCOL__pack_set_throttling_state_request(&request, &request_size, device.get_control_sequence(), should_activate);
-
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<bool> Control::get_throttling_state(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_throttling_state_response_t *get_throttling_state_response = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_get_throttling_state_request(&request, &request_size, device.get_control_sequence());
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- get_throttling_state_response = (CONTROL_PROTOCOL__get_throttling_state_response_t *)(payload->parameters);
- return std::move(get_throttling_state_response->is_active);
-}
-
-hailo_status Control::set_overcurrent_state(Device &device, bool should_activate)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
-
- auto common_status = CONTROL_PROTOCOL__pack_set_overcurrent_state_request(&request, &request_size, device.get_control_sequence(), should_activate);
-
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<bool> Control::get_overcurrent_state(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_overcurrent_state_response_t *get_overcurrent_state_response = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_get_overcurrent_state_request(&request, &request_size, device.get_control_sequence());
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- get_overcurrent_state_response = (CONTROL_PROTOCOL__get_overcurrent_state_response_t *)(payload->parameters);
- return std::move(get_overcurrent_state_response->is_required);
-}
-
-Expected<CONTROL_PROTOCOL__hw_consts_t> Control::get_hw_consts(Device &device)
-{
- size_t request_size = 0;
- CONTROL_PROTOCOL__request_t request = {};
- auto common_status = CONTROL_PROTOCOL__pack_get_hw_consts_request(&request, &request_size, device.get_control_sequence());
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- const auto &response = *reinterpret_cast<CONTROL_PROTOCOL__get_hw_consts_response_t*>(payload->parameters);
- return Expected<CONTROL_PROTOCOL__hw_consts_t>(response.hw_consts);
-}
-
-hailo_status Control::write_memory_chunk(Device &device, uint32_t address, const uint8_t *data, uint32_t chunk_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
-
- /* Validate arguments */
- ASSERT(NULL != data);
-
- /* Validate chunk size is valid */
- ASSERT(CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE >= chunk_size);
- ASSERT(0 != chunk_size);
-
- common_status = CONTROL_PROTOCOL__pack_write_memory_request(&request, &request_size, device.get_control_sequence(), address, data, chunk_size);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::write_memory(Device &device, uint32_t address, const uint8_t *data, uint32_t data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- uint32_t current_write_address = address;
- const uint8_t* current_data_address = data;
- uint32_t chunk_size = CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE;
- uint32_t number_of_chunks = data_length / chunk_size;
- uint32_t data_chunk_leftover = data_length % chunk_size;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- if (data_length >= chunk_size) {
- for (size_t i = 0; i < number_of_chunks; i++ ) {
- /* Write current memory chunk */
- status = write_memory_chunk(device, current_write_address, current_data_address, chunk_size);
- CHECK_SUCCESS(status);
-
- current_write_address += chunk_size;
- current_data_address += chunk_size;
- }
- }
-
- if (data_chunk_leftover > 0) {
- /* Write leftover */
- status = write_memory_chunk(device, current_write_address, current_data_address, data_chunk_leftover);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::read_memory_chunk(Device &device, uint32_t address, uint8_t *data, uint32_t chunk_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- uint32_t actual_read_data_length = 0;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__read_memory_response_t *read_memory_response = NULL;
-
-
- /* Validate arguments */
- ASSERT(NULL != data);
-
-
- /* Validate chunk size is valid */
- ASSERT(CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE >= chunk_size);
- ASSERT(0 != chunk_size);
-
- common_status = CONTROL_PROTOCOL__pack_read_memory_request(&request, &request_size, device.get_control_sequence(), address, chunk_size);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- read_memory_response = (CONTROL_PROTOCOL__read_memory_response_t *)(payload->parameters);
- actual_read_data_length = BYTE_ORDER__ntohl(read_memory_response->data_length);
- if (chunk_size != actual_read_data_length) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Did not read all data from control response");
- goto exit;
- }
- (void)memcpy(data, &read_memory_response->data[0], actual_read_data_length);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::read_memory(Device &device, uint32_t address, uint8_t *data, uint32_t data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- uint32_t current_read_address = address;
- uint8_t* current_data_address = data;
- uint32_t chunk_size = CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE;
- uint32_t number_of_chunks = data_length / chunk_size;
- uint32_t data_chunk_leftover = data_length % chunk_size;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- if (data_length >= chunk_size) {
- for (size_t i = 0; i < number_of_chunks; i++ ) {
- /* Read current memory chunk */
- status = read_memory_chunk(device, current_read_address, current_data_address, chunk_size);
- CHECK_SUCCESS(status);
-
- current_read_address += chunk_size;
- current_data_address += chunk_size;
- }
- }
-
- if (data_chunk_leftover > 0) {
- /* Read leftover */
- status = read_memory_chunk(device, current_read_address, current_data_address, data_chunk_leftover);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::open_stream(Device &device, uint8_t dataflow_manager_id, bool is_input)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_open_stream_request(&request, &request_size, device.get_control_sequence(),
- dataflow_manager_id, is_input);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::close_stream(Device &device, uint8_t dataflow_manager_id, bool is_input)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_close_stream_request(&request, &request_size, device.get_control_sequence(),
- dataflow_manager_id, is_input);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::close_all_streams(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- /* Close all input streams */
- status = close_stream(device, CONTROL_PROTOCOL__ALL_DATAFLOW_MANAGERS, true);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Close all output streams */
- status = close_stream(device, CONTROL_PROTOCOL__ALL_DATAFLOW_MANAGERS, false);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_stream_udp_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
- uint32_t dataflow_manager_id_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_stream_udp_input_request(&request, &request_size,
- device.get_control_sequence(), params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
- dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
-
- /* Validate read data is data size */
- if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- goto exit;
- }
-
- dataflow_manager_id = response->dataflow_manager_id;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_stream_udp_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
- uint32_t dataflow_manager_id_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_stream_udp_output_request(&request, &request_size,
- device.get_control_sequence(), params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
- dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
-
- /* Validate read data is data size */
- if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- goto exit;
- }
-
- dataflow_manager_id = response->dataflow_manager_id;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_stream_mipi_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
- uint32_t dataflow_manager_id_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_stream_mipi_input_request(&request, &request_size,
- device.get_control_sequence(), params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
- dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
-
- /* Validate read data is data size */
- if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- goto exit;
- }
-
- dataflow_manager_id = response->dataflow_manager_id;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_stream_mipi_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
- uint32_t dataflow_manager_id_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_stream_mipi_output_request(&request, &request_size,
- device.get_control_sequence(), params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
- dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
-
- /* Validate read data is data size */
- if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- goto exit;
- }
-
- dataflow_manager_id = response->dataflow_manager_id;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_stream_pcie_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
- uint32_t dataflow_manager_id_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_stream_pcie_input_request(&request, &request_size,
- device.get_control_sequence(), params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
- dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
-
- /* Validate read data is data size */
- if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- goto exit;
- }
-
- dataflow_manager_id = response->dataflow_manager_id;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_stream_pcie_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
- uint32_t dataflow_manager_id_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_stream_pcie_output_request(&request, &request_size,
- device.get_control_sequence(), params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
- dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
-
- /* Validate read data is data size */
- if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- goto exit;
- }
-
- dataflow_manager_id = response->dataflow_manager_id;
-
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-// TODO: needed?
-hailo_status Control::power_measurement(Device &device, CONTROL_PROTOCOL__dvm_options_t dvm,
- CONTROL_PROTOCOL__power_measurement_types_t measurement_type, float32_t *measurement)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__power_measurement_response_t *response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(measurement);
-
- common_status = CONTROL_PROTOCOL__pack_power_measurement_request(&request, &request_size, device.get_control_sequence(),
- dvm, measurement_type);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- response = (CONTROL_PROTOCOL__power_measurement_response_t*)(payload->parameters);
-
- LOGGER__INFO("The chosen dvm type is: {}, and measurement type: {}", response->dvm,
- response->measurement_type);
- if (CONTROL_PROTOCOL__DVM_OPTIONS_OVERCURRENT_PROTECTION == response->dvm) {
- LOGGER__WARN(OVERCURRENT_PROTECTION_WARNING);
- }
-
- *measurement = response->power_measurement;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::set_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, CONTROL_PROTOCOL__dvm_options_t dvm,
- CONTROL_PROTOCOL__power_measurement_types_t measurement_type)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__set_power_measurement_response_t *response = NULL;
-
- CHECK(CONTROL_PROTOCOL__MAX_NUMBER_OF_POWER_MEASUREMETS > buffer_index,
- HAILO_INVALID_ARGUMENT, "Invalid power measurement index {}", buffer_index);
-
- common_status = CONTROL_PROTOCOL__pack_set_power_measurement_request(&request, &request_size, device.get_control_sequence(),
- buffer_index, dvm, measurement_type);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- response = (CONTROL_PROTOCOL__set_power_measurement_response_t*)(payload->parameters);
-
- LOGGER__INFO("The chosen dvm type is: {}, and measurement type: {}", response->dvm,
- response->measurement_type);
- if (CONTROL_PROTOCOL__DVM_OPTIONS_OVERCURRENT_PROTECTION == response->dvm) {
- LOGGER__WARN(OVERCURRENT_PROTECTION_WARNING);
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::get_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, bool should_clear,
- hailo_power_measurement_data_t *measurement_data)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_power_measurement_response_t *get_power_response = NULL;
-
- /* Validate arguments */
- CHECK(CONTROL_PROTOCOL__MAX_NUMBER_OF_POWER_MEASUREMETS > buffer_index,
- HAILO_INVALID_ARGUMENT, "Invalid power measurement index {}", buffer_index);
- CHECK_ARG_NOT_NULL(measurement_data);
- common_status = CONTROL_PROTOCOL__pack_get_power_measurement_request(&request, &request_size, device.get_control_sequence(),
- buffer_index, should_clear);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
- get_power_response = (CONTROL_PROTOCOL__get_power_measurement_response_t *)(payload->parameters);
-
- /* Copy measurement data from response to the exported measurement data */
- measurement_data->average_time_value_milliseconds = get_power_response->average_time_value_milliseconds;
- measurement_data->average_value = get_power_response->average_value;
- measurement_data->min_value = get_power_response->min_value;
- measurement_data->max_value = get_power_response->max_value;
- measurement_data->total_number_of_samples = BYTE_ORDER__ntohl(get_power_response->total_number_of_samples);
- LOGGER__DEBUG("avg: {:f}, min: {:f}, max: {:f}",
- measurement_data->average_value,
- measurement_data->min_value,
- measurement_data->max_value);
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::start_power_measurement(Device &device,
- CONTROL_PROTOCOL__averaging_factor_t averaging_factor , CONTROL_PROTOCOL__sampling_period_t sampling_period)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- uint32_t delay_milliseconds = POWER_MEASUREMENT_DELAY_MS(sampling_period, averaging_factor);
- // There is no logical way that measurement delay can be 0 - because sampling_period and averaging_factor cant be 0
- // Hence if it is 0 - it means it was 0.xx and we want to round up to 1 in that case
- if (0 == delay_milliseconds) {
- delay_milliseconds = 1;
- }
-
- common_status = CONTROL_PROTOCOL__pack_start_power_measurement_request(&request, &request_size, device.get_control_sequence(),
- delay_milliseconds, averaging_factor, sampling_period);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::stop_power_measurement(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_stop_power_measurement_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::i2c_write(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
- const uint8_t *data, uint32_t length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(slave_config);
- CHECK_ARG_NOT_NULL(data);
-
- /* Pack request */
- common_status = CONTROL_PROTOCOL__pack_i2c_write_request(&request, &request_size, device.get_control_sequence(),
- register_address, static_cast<uint8_t>(slave_config->endianness),
- slave_config->slave_address, slave_config->register_address_size, slave_config->bus_index, data, length);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
- &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::i2c_read(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
- uint8_t *data, uint32_t length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__i2c_read_response_t *response = NULL;
- uint32_t local_data_length = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(slave_config);
- CHECK_ARG_NOT_NULL(data);
-
- /* Pack request */
- common_status = CONTROL_PROTOCOL__pack_i2c_read_request(&request, &request_size, device.get_control_sequence(),
- register_address, static_cast<uint8_t>(slave_config->endianness),
- slave_config->slave_address, slave_config->register_address_size, slave_config->bus_index, length,
- slave_config->should_hold_bus);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
- &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__i2c_read_response_t *)(payload->parameters);
- local_data_length = BYTE_ORDER__ntohl(response->data_length);
-
- /* Validate read data is data size */
- if (local_data_length != length) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Read data size from I2C does not match register size. ({} != {})",
- local_data_length, length);
- goto exit;
- }
-
- /* Copy the returned results back to the user */
- (void)memcpy(data, response->data, local_data_length);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_core_top(Device &device, CONTROL_PROTOCOL__config_core_top_type_t config_type,
- CONTROL_PROTOCOL__config_core_top_params_t *params)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(params);
-
- common_status = CONTROL_PROTOCOL__pack_config_core_top_request(&request, &request_size, device.get_control_sequence(), config_type, params);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::phy_operation(Device &device, CONTROL_PROTOCOL__phy_operation_t operation_type)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_phy_operation_request(&request, &request_size, device.get_control_sequence(), operation_type);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::examine_user_config(Device &device, hailo_fw_user_config_information_t *info)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__examine_user_config_response_t *response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(info);
-
- common_status = CONTROL_PROTOCOL__pack_examine_user_config(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Save response information into exported struct */
- response = ((CONTROL_PROTOCOL__examine_user_config_response_t *)(payload->parameters));
- info->version = BYTE_ORDER__ntohl(response->version);
- info->entry_count = BYTE_ORDER__ntohl(response->entry_count);
- info->total_size = BYTE_ORDER__ntohl(response->total_size);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::read_user_config_chunk(Device &device, uint32_t read_offset, uint32_t read_length,
- uint8_t *buffer, uint32_t *actual_read_data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__read_user_config_response_t *response = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_read_user_config(&request, &request_size, device.get_control_sequence(),
- read_offset, read_length);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
- &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- response = (CONTROL_PROTOCOL__read_user_config_response_t *)(payload->parameters);
- *actual_read_data_length = BYTE_ORDER__ntohl(response->data_length);
- (void) memcpy(buffer, response->data, *actual_read_data_length);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::read_user_config(Device &device, uint8_t *buffer, uint32_t buffer_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- uint32_t actual_read_data_length = 0;
- uint32_t read_offset = 0;
- hailo_fw_user_config_information_t user_config_info = {};
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(buffer);
-
- status = examine_user_config(device, &user_config_info);
- CHECK_SUCCESS(status);
-
- CHECK(buffer_length >= user_config_info.total_size, HAILO_INSUFFICIENT_BUFFER,
- "read buffer is too small. provided buffer size: {} bytes, user config size: {} bytes", buffer_length,
- user_config_info.total_size);
-
- LOGGER__INFO("Preparing to read user configuration. Version: {}, Entry Count: {}, Total Size (bytes): {}",
- user_config_info.version, user_config_info.entry_count, user_config_info.total_size);
-
- while (read_offset < user_config_info.total_size) {
- read_user_config_chunk(device, read_offset, user_config_info.total_size - read_offset,
- buffer + read_offset, &actual_read_data_length);
- read_offset += actual_read_data_length;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::write_user_config_chunk(Device &device, uint32_t offset, const uint8_t *data, uint32_t chunk_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_write_user_config_request(&request, &request_size,
- device.get_control_sequence(), offset, data + offset, chunk_size);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
- &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::write_user_config(Device &device, const uint8_t *data, uint32_t data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- uint32_t offset = 0;
- uint32_t chunk_size = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- while (offset < data_length) {
- chunk_size = MIN(WRITE_CHUNK_SIZE, (data_length - offset));
- status = write_user_config_chunk(device, offset, data, chunk_size);
- CHECK_SUCCESS(status);
- offset += chunk_size;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::erase_user_config(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_erase_user_config_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-
-hailo_status Control::read_board_config(Device &device, uint8_t *buffer, uint32_t buffer_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- uint32_t actual_read_data_length = 0;
- uint32_t read_offset = 0;
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__read_user_config_response_t *response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(buffer);
-
- CHECK(buffer_length >= BOARD_CONFIG_SIZE, HAILO_INSUFFICIENT_BUFFER,
- "read buffer is too small. provided buffer size: {} bytes, board config size: {} bytes", buffer_length,
- BOARD_CONFIG_SIZE);
-
- LOGGER__INFO("Preparing to read board configuration");
- common_status = CONTROL_PROTOCOL__pack_read_board_config(&request, &request_size, device.get_control_sequence(),
- read_offset, BOARD_CONFIG_SIZE);
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
- &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
- response = (CONTROL_PROTOCOL__read_board_config_response_t *)(payload->parameters);
- actual_read_data_length = BYTE_ORDER__ntohl(response->data_length);
- (void) memcpy(buffer, response->data, actual_read_data_length);
-
- return HAILO_SUCCESS;
-}
-
-
-
-hailo_status Control::write_board_config(Device &device, const uint8_t *data, uint32_t data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- uint32_t write_offset = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- CHECK(BOARD_CONFIG_SIZE >= data_length, HAILO_INVALID_OPERATION,
- "Invalid size of board config. data_length={}, max_size={}" , data_length, BOARD_CONFIG_SIZE);
-
- common_status = CONTROL_PROTOCOL__pack_write_board_config_request(&request, &request_size,
- device.get_control_sequence(), write_offset, data + write_offset, data_length);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
- &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::write_second_stage_to_internal_memory(Device &device, uint32_t offset, uint8_t *data, uint32_t data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- common_status = CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request(&request, &request_size, device.get_control_sequence(), offset,
- data, data_length);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-
-hailo_status Control::copy_second_stage_to_flash(Device &device, MD5_SUM_t *expected_md5, uint32_t second_stage_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(expected_md5);
-
- common_status = CONTROL_PROTOCOL__copy_second_stage_to_flash_request(&request, &request_size, device.get_control_sequence(), expected_md5, second_stage_size);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::start_firmware_update(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_start_firmware_update_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::finish_firmware_update(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_finish_firmware_update_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::write_firmware_update(Device &device, uint32_t offset, const uint8_t *data, uint32_t data_length)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- common_status = CONTROL_PROTOCOL__write_firmware_update_request(&request, &request_size, device.get_control_sequence(), offset,
- data, data_length);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::validate_firmware_update(Device &device, MD5_SUM_t *expected_md5, uint32_t firmware_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(expected_md5);
-
- common_status = CONTROL_PROTOCOL__pack_validate_firmware_update_request(&request, &request_size, device.get_control_sequence(),
- expected_md5, firmware_size);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::latency_measurement_read(Device &device, uint32_t *inbound_to_outbound_latency_nsec)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__latency_read_response_t *response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(inbound_to_outbound_latency_nsec);
-
- common_status = CONTROL_PROTOCOL__pack_latency_measurement_read_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- response = (CONTROL_PROTOCOL__latency_read_response_t*)(payload->parameters);
- *inbound_to_outbound_latency_nsec = BYTE_ORDER__ntohl(response->inbound_to_outbound_latency_nsec);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::latency_measurement_config(Device &device, uint8_t latency_measurement_en,
- uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index,
- uint32_t outbound_stream_index)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_latency_measurement_config_request(&request, &request_size, device.get_control_sequence(),
- latency_measurement_en, inbound_start_buffer_number, outbound_stop_buffer_number,
- inbound_stream_index, outbound_stream_index);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-
-hailo_status Control::sensor_store_config(Device &device, uint32_t is_first, uint32_t section_index,
- uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size, uint8_t *data,
- uint32_t data_length,uint16_t config_height, uint16_t config_width, uint16_t config_fps,
- uint32_t config_name_length, uint8_t *config_name)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
- CHECK_ARG_NOT_NULL(config_name);
-
-
- common_status = CONTROL_PROTOCOL__pack_sensor_store_config_request(&request, &request_size, device.get_control_sequence(), is_first, section_index, start_offset,
- reset_data_size, sensor_type, total_data_size, data, data_length, config_height,
- config_width, config_fps, config_name_length, config_name);
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::sensor_set_i2c_bus_index(Device &device, uint32_t sensor_type, uint32_t bus_index)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- status = CONTROL_PROTOCOL__pack_sensor_set_i2c_bus_index_request(&request, &request_size, device.get_control_sequence(), sensor_type, bus_index);
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::sensor_load_and_start_config(Device &device, uint32_t section_index)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_sensor_load_and_start_config_request(&request, &request_size, device.get_control_sequence(), section_index);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::sensor_reset(Device &device, uint32_t section_index)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_sensor_reset_request(&request, &request_size, device.get_control_sequence(), section_index);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::sensor_set_generic_i2c_slave(Device &device, uint16_t slave_address,
- uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_sensor_set_generic_i2c_slave_request(&request, &request_size, device.get_control_sequence(), slave_address, register_address_size, bus_index, should_hold_bus, endianness);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-
-hailo_status Control::sensor_get_config(Device &device, uint32_t section_index, uint32_t offset, uint32_t data_length,
- uint8_t *data)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- uint32_t actual_read_data_length = 0;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__sensor_get_config_response_t *sensor_get_config_response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- common_status = CONTROL_PROTOCOL__pack_sensor_get_config_request(&request, &request_size, device.get_control_sequence(), section_index, offset, data_length);
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- sensor_get_config_response = (CONTROL_PROTOCOL__sensor_get_config_response_t *)(payload->parameters);
- actual_read_data_length = BYTE_ORDER__ntohl(sensor_get_config_response->data_length);
- if (data_length != actual_read_data_length) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Did not read all data from control response");
- goto exit;
- }
- (void)memcpy(data, &sensor_get_config_response->data[0], actual_read_data_length);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::sensor_get_sections_info(Device &device, uint8_t *data)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- uint32_t actual_read_data_length = 0;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__sensor_get_sections_info_response_t *get_sections_info_response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(data);
-
- common_status = CONTROL_PROTOCOL__pack_sensor_get_sections_info_request(&request, &request_size, device.get_control_sequence());
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- get_sections_info_response = (CONTROL_PROTOCOL__sensor_get_sections_info_response_t *)(payload->parameters);
-
- actual_read_data_length = BYTE_ORDER__ntohl(get_sections_info_response->data_length);
- if (0 == actual_read_data_length) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Did not read all data from control response");
- goto exit;
- }
- (void)memcpy(data, &get_sections_info_response->data[0], actual_read_data_length);
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::context_switch_set_network_group_header(Device &device,
- const CONTROL_PROTOCOL__application_header_t &network_group_header)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_context_switch_set_network_group_header_request(&request, &request_size,
- device.get_control_sequence(), &network_group_header);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::context_switch_set_context_info_chunk(Device &device,
- const CONTROL_PROTOCOL__context_switch_context_info_single_control_t &context_info)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_context_switch_set_context_info_request(&request, &request_size, device.get_control_sequence(),
- &context_info);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- /* In case of max memory error, add LOGGER ERROR, and set indicative error to the user */
- CHECK((CONTEXT_SWITCH_TASK_STATUS_ADD_TRIGGER_FUNCTION_REACHED_FORBIDDEN_MEMORY_SPACE != header->status.major_status),
- HAILO_OUT_OF_FW_MEMORY,
- "Configfured network groups Reached maximum device internal memory. please consider using less network groups.");
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::context_switch_set_context_info(Device &device,
- const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &context_infos)
-{
- for (const auto &context_info : context_infos) {
- auto status = context_switch_set_context_info_chunk(device, context_info);
- CHECK_SUCCESS(status);
- }
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::idle_time_get_measurement(Device &device, uint64_t *measurement)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__idle_time_get_measurement_response_t *idle_time_get_measurement_response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(measurement);
-
- common_status = CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request with status {:#X}", common_status);
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed idle_time_get_measurement control with status {}", status);
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed validating idle_time_get_measurement control response with status {}", status);
- goto exit;
- }
-
- idle_time_get_measurement_response = (CONTROL_PROTOCOL__idle_time_get_measurement_response_t *)(payload->parameters);
-
- /*copy the measurement*/
- *measurement = BYTE_ORDER__ntohll(idle_time_get_measurement_response->idle_time_ns);
-
- LOGGER__DEBUG("Received idle measurement low: {:#X} ns",
- *((uint32_t *) measurement));
- LOGGER__DEBUG("Received idle measurement high: {:#X} ns",
- *(((uint32_t *) measurement) + 1));
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::idle_time_set_measurement(Device &device, uint8_t measurement_enable)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- LOGGER__DEBUG("Sending idle_time_set_measurement with parameter {}", measurement_enable);
-
- common_status = CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request(&request, &request_size, device.get_control_sequence(), measurement_enable);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request with status {:#X}", common_status);
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed idle_time_set_measurement control with status {}", status);
- goto exit;
- }
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::set_pause_frames(Device &device, uint8_t rx_pause_frames_enable)
-{
-
- LOGGER__DEBUG("Sending set_pause_frames with parameter {}", rx_pause_frames_enable);
-
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- HAILO_COMMON_STATUS_t common_status = CONTROL_PROTOCOL__pack_set_pause_frames_request(&request, &request_size,
- device.get_control_sequence(), rx_pause_frames_enable);
- hailo_status status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::download_context_action_list_chunk(Device &device, uint32_t network_group_id,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index,
- uint16_t action_list_offset, size_t action_list_max_size, uint32_t *base_address, uint8_t *action_list,
- uint16_t *action_list_length, bool *is_action_list_end, uint32_t *batch_counter)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__download_context_action_list_response_t *context_action_list_response = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(base_address);
- CHECK_ARG_NOT_NULL(action_list);
- CHECK_ARG_NOT_NULL(action_list_length);
-
- common_status = CONTROL_PROTOCOL__pack_download_context_action_list_request(&request, &request_size, device.get_control_sequence(),
- network_group_id, context_type, context_index, action_list_offset);
-
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- context_action_list_response = (CONTROL_PROTOCOL__download_context_action_list_response_t *)(payload->parameters);
-
- if (0 == BYTE_ORDER__ntohl(context_action_list_response->action_list_length)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Received empty action list");
- goto exit;
- }
- if (0 == BYTE_ORDER__ntohl(context_action_list_response->base_address)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Received NULL pointer to base address");
- goto exit;
- }
-
- if (action_list_max_size < BYTE_ORDER__ntohl(context_action_list_response->action_list_length)) {
- status = HAILO_INVALID_CONTROL_RESPONSE;
- LOGGER__ERROR("Received action list bigger than allocated user buffer");
- }
-
- (void)memcpy(action_list, context_action_list_response->action_list
- ,BYTE_ORDER__ntohl(context_action_list_response->action_list_length));
-
- *action_list_length = (uint16_t)(BYTE_ORDER__ntohl(context_action_list_response->action_list_length));
- *base_address = BYTE_ORDER__ntohl(context_action_list_response->base_address);
- *is_action_list_end = context_action_list_response->is_action_list_end;
- *batch_counter = BYTE_ORDER__ntohl(context_action_list_response->batch_counter);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::download_context_action_list(Device &device, uint32_t network_group_id,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, size_t action_list_max_size,
- uint32_t *base_address, uint8_t *action_list, uint16_t *action_list_length, uint32_t *batch_counter)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- bool is_action_list_end = false;
- uint16_t chunk_action_list_length = 0;
- uint16_t accumulated_action_list_length = 0;
- uint8_t *action_list_current_offset = 0;
- size_t remaining_action_list_max_size = 0;
- uint32_t chunk_base_address = 0;
- uint32_t batch_counter_local = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(base_address);
- CHECK_ARG_NOT_NULL(action_list);
- CHECK_ARG_NOT_NULL(action_list_length);
-
- action_list_current_offset = action_list;
- remaining_action_list_max_size = action_list_max_size;
-
- do {
- status = download_context_action_list_chunk(device, network_group_id, context_type, context_index,
- accumulated_action_list_length, remaining_action_list_max_size, &chunk_base_address,
- action_list_current_offset, &chunk_action_list_length, &is_action_list_end, &batch_counter_local);
- CHECK_SUCCESS(status);
-
- accumulated_action_list_length = (uint16_t)(accumulated_action_list_length + chunk_action_list_length);
- action_list_current_offset += chunk_action_list_length;
- remaining_action_list_max_size -= chunk_action_list_length;
- }
- while (!is_action_list_end);
-
- /* Set output variables */
- *base_address = chunk_base_address;
- *action_list_length = accumulated_action_list_length;
- *batch_counter = batch_counter_local;
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::change_context_switch_status(Device &device,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status,
- uint8_t network_group_index, uint16_t dynamic_batch_size, bool keep_nn_config_during_reset)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_change_context_switch_status_request(&request, &request_size,
- device.get_control_sequence(), state_machine_status, network_group_index, dynamic_batch_size,
- keep_nn_config_during_reset);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::enable_network_group(Device &device, uint8_t network_group_index, uint16_t dynamic_batch_size)
-{
- static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
- return Control::change_context_switch_status(device, CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_ENABLED,
- network_group_index, dynamic_batch_size, REMOVE_NN_CONFIG_DURING_RESET);
-}
-
-hailo_status Control::reset_context_switch_state_machine(Device &device, bool keep_nn_config_during_reset)
-{
- static const auto IGNORE_NETWORK_GROUP_INDEX = 0;
- static const auto IGNORE_DYNAMIC_BATCH_SIZE = 0;
- return Control::change_context_switch_status(device, CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_RESET,
- IGNORE_NETWORK_GROUP_INDEX, IGNORE_DYNAMIC_BATCH_SIZE, keep_nn_config_during_reset);
-}
-
-hailo_status Control::wd_enable(Device &device, uint8_t cpu_id, bool should_enable)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- LOGGER__DEBUG("Sending wd_enable with parameters cpu_id: {}, should_enable: {}", cpu_id, should_enable);
-
- common_status = CONTROL_PROTOCOL__pack_wd_enable(&request, &request_size, device.get_control_sequence(), cpu_id, should_enable);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_wd_enable with status {:#X}", common_status);
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed wd_enable control with status {}", status);
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-hailo_status Control::wd_config(Device &device, uint8_t cpu_id, uint32_t wd_cycles, CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- LOGGER__DEBUG("Sending wd_config with parameters cpu_id: {}, wd_cycles: {} wd_mode {}", cpu_id, wd_cycles, wd_mode);
-
- common_status = CONTROL_PROTOCOL__pack_wd_config(&request, &request_size, device.get_control_sequence(), cpu_id, wd_cycles, wd_mode);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_wd_config with status {:#X}", common_status);
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed wd_config control with status {}", status);
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::previous_system_state(Device &device, uint8_t cpu_id, CONTROL_PROTOCOL__system_state_t *system)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__previous_system_state_response_t *previous_system_state_response = NULL;
-
- LOGGER__DEBUG("Sending previous system state with parameter cpu_id: {}", cpu_id);
-
- CHECK_ARG_NOT_NULL(system);
-
- common_status = CONTROL_PROTOCOL__pack_previous_system_state(&request, &request_size, device.get_control_sequence(), cpu_id);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_previous_system_state with status {:#X}", common_status);
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed previous_system_state control with status {}", status);
- goto exit;
- }
-
- previous_system_state_response = (CONTROL_PROTOCOL__previous_system_state_response_t *)(payload->parameters);
-
- /*copy the measurement*/
- *system = BYTE_ORDER__ntohl(previous_system_state_response->system_state);
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::set_dataflow_interrupt(Device &device, uint8_t interrupt_type, uint8_t interrupt_index,
- uint8_t interrupt_sub_index)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_set_dataflow_interrupt_request(&request, &request_size, device.get_control_sequence(),
- interrupt_type, interrupt_index, interrupt_sub_index);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::d2h_notification_manager_set_host_info(Device &device, uint16_t host_port, uint32_t host_ip_address)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- auto connection_type = ((Device::Type::PCIE == device.get_type() || Device::Type::CORE == device.get_type()) ?
- D2H_EVENT_COMMUNICATION_TYPE_VDMA : D2H_EVENT_COMMUNICATION_TYPE_UDP);
-
- LOGGER__DEBUG("Set d2h notification manager new host info : connection_type {}, remote_port {}, remote_ip_address {}", connection_type, host_port, host_ip_address);
-
- common_status = CONTROL_PROTOCOL__pack_d2h_event_manager_set_host_info_request(&request, &request_size, device.get_control_sequence(),
- static_cast<uint8_t>(connection_type), host_port, host_ip_address);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::d2h_notification_manager_send_host_info_notification(Device &device, uint8_t notification_priority)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- LOGGER__DEBUG("Send d2h host notification with priority {}", notification_priority);
-
- common_status = CONTROL_PROTOCOL__pack_d2h_event_manager_send_host_info_event_request(&request, &request_size, device.get_control_sequence(), notification_priority);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-
-hailo_status Control::clear_configured_apps(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- LOGGER__DEBUG("Sending clear_configured_apps");
-
- common_status = CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request(&request, &request_size,
- device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request with status {:#X}",
- common_status);
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("failed clear_configured_apps control with status {}", status);
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::get_chip_temperature(Device &device, hailo_chip_temperature_info_t *temp_info)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_chip_temperature_response_t* temps = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_get_chip_temperature_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- temps = (CONTROL_PROTOCOL__get_chip_temperature_response_t *)(payload->parameters);
- temp_info->sample_count = BYTE_ORDER__ntohs(temps->info.sample_count);
- temp_info->ts0_temperature = temps->info.ts0_temperature;
- temp_info->ts1_temperature = temps->info.ts1_temperature;
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::enable_debugging(Device &device, bool is_rma)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_enable_debugging_request(&request, &request_size, device.get_control_sequence(), is_rma);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-Expected<CONTROL_PROTOCOL__get_extended_device_information_response_t> Control::get_extended_device_info_response(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Validate arguments */
-
- common_status = CONTROL_PROTOCOL__pack_get_extended_device_information_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return std::move(*(CONTROL_PROTOCOL__get_extended_device_information_response_t *)(payload->parameters));
-}
-
-Expected<uint32_t> Control::get_partial_clusters_layout_bitmap(Device &device)
-{
- auto device_arch_exp = device.get_architecture();
- CHECK_EXPECTED(device_arch_exp);
- if (HAILO_ARCH_HAILO8L != device_arch_exp.value()) {
- // Partial clusters layout is only relevant in HAILO_ARCH_HAILO8L arch
- return Expected<uint32_t>(PARTIAL_CLUSTERS_LAYOUT_IGNORE);
- }
- auto extended_device_info_response = get_extended_device_info_response(device);
- CHECK_EXPECTED(extended_device_info_response);
- return BYTE_ORDER__ntohl(extended_device_info_response->partial_clusters_layout_bitmap);
-}
-
-Expected<hailo_extended_device_information_t> Control::get_extended_device_information(Device &device)
-{
- auto extended_device_info_response = get_extended_device_info_response(device);
- CHECK_EXPECTED(extended_device_info_response);
- return control__parse_get_extended_device_information_results(extended_device_info_response.value());
-}
-
-Expected<hailo_health_info_t> Control::get_health_information(Device &device)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_health_information_response_t *get_health_information_response = NULL;
-
- /* Validate arguments */
-
- common_status = CONTROL_PROTOCOL__pack_get_health_information_request(&request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- get_health_information_response = (CONTROL_PROTOCOL__get_health_information_response_t *)(payload->parameters);
-
- return control__parse_get_health_information_results(get_health_information_response);
-}
-
-hailo_status Control::config_context_switch_breakpoint(Device &device, uint8_t breakpoint_id,
- CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- common_status = CONTROL_PROTOCOL__pack_config_context_switch_breakpoint_request(
- &request, &request_size, device.get_control_sequence(), breakpoint_id, breakpoint_control, breakpoint_data);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::get_context_switch_breakpoint_status(Device &device, uint8_t breakpoint_id,
- CONTROL_PROTOCOL__context_switch_debug_sys_status_t *breakpoint_status)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_context_switch_breakpoint_status_response_t *get_context_switch_breakpoint_status_response = NULL;
-
- RETURN_IF_ARG_NULL(breakpoint_status);
-
- common_status = CONTROL_PROTOCOL__pack_get_context_switch_breakpoint_status_request(
- &request, &request_size, device.get_control_sequence(), breakpoint_id);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- get_context_switch_breakpoint_status_response =
- (CONTROL_PROTOCOL__get_context_switch_breakpoint_status_response_t *)(payload->parameters);
-
- memcpy(breakpoint_status,
- &(get_context_switch_breakpoint_status_response->breakpoint_status),
- sizeof((*breakpoint_status)));
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::get_context_switch_main_header(Device &device, CONTROL_PROTOCOL__context_switch_main_header_t *main_header)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- CONTROL_PROTOCOL__get_context_switch_main_header_response_t *get_context_switch_main_header_response = NULL;
-
- RETURN_IF_ARG_NULL(main_header);
-
- common_status = CONTROL_PROTOCOL__pack_get_context_switch_main_header_request(
- &request, &request_size, device.get_control_sequence());
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- if (HAILO_SUCCESS != status) {
- goto exit;
- }
-
- get_context_switch_main_header_response =
- (CONTROL_PROTOCOL__get_context_switch_main_header_response_t *)(payload->parameters);
-
- memcpy(main_header,
- &(get_context_switch_main_header_response->main_header),
- sizeof((*main_header)));
-
- status = HAILO_SUCCESS;
-exit:
- return status;
-}
-
-hailo_status Control::config_context_switch_timestamp(Device &device, uint16_t batch_index, bool enable_user_configuration)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- auto common_status = CONTROL_PROTOCOL__pack_config_context_switch_timestamp_request(
- &request, &request_size, device.get_control_sequence(), batch_index, enable_user_configuration);
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::test_chip_memories(Device &device)
-{
- uint32_t top_bypass_bitmap = 0;
- hailo_status status = HAILO_UNINITIALIZED;
-
- /*cluster bypass and index are irrelevant for top*/
- uint32_t cluster_bypass_bitmap_0 = 0;
- uint32_t cluster_bypass_bitmap_1 = 0;
-
- for (size_t mem_block = 0; mem_block < CONTROL_PROTOCOL__TOP_NUM_MEM_BLOCKS; mem_block++) {
- /*only run test on allowed blocks */
- if (0 == (CONTROL_PROTOCOL__BIST_TOP_WHITELIST & (1 << mem_block))) {
- continue;
- }
- top_bypass_bitmap = CONTROL_PROTOCOL__BIST_TOP_BYPASS_ALL_MASK ^ (1 << mem_block);
- auto block_status = run_bist_test(device, true, top_bypass_bitmap, 0, cluster_bypass_bitmap_0, cluster_bypass_bitmap_1);
- if (HAILO_SUCCESS != block_status) {
- LOGGER__ERROR("bist test failed on memory block {}", mem_block);
- status = block_status;
- }
- }
-
- for (uint8_t cluster_index = 0; cluster_index < CONTROL_PROTOCOL_NUM_BIST_CLUSTER_STEPS; cluster_index++) {
- /*top bypass irrelevant for clusters*/
- top_bypass_bitmap = 0;
- /*run on all memory blocks, bypass = 0*/
- cluster_bypass_bitmap_0 = 0;
- cluster_bypass_bitmap_1 = 0;
- auto cluster_status = run_bist_test(device, false, top_bypass_bitmap, cluster_index, cluster_bypass_bitmap_0, cluster_bypass_bitmap_1);
- if (HAILO_SUCCESS != cluster_status) {
- LOGGER__ERROR("bist test failed on cluster block {}", cluster_index);
- status = cluster_status;
- }
- }
-
- /*No errors encountered*/
- if (HAILO_UNINITIALIZED == status){
- status = HAILO_SUCCESS;
- }
-
- return status;
-}
-
-hailo_status Control::run_bist_test(Device &device, bool is_top_test, uint32_t top_bypass_bitmap,
- uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- auto common_status = CONTROL_PROTOCOL__pack_run_bist_test_request(
- &request, &request_size, device.get_control_sequence(),
- is_top_test, top_bypass_bitmap, cluster_index, cluster_bypass_bitmap_0, cluster_bypass_bitmap_1);
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Control::set_sleep_state(Device &device, hailo_sleep_state_t sleep_state)
-{
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = sizeof(response_buffer);
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- auto common_status = CONTROL_PROTOCOL__pack_set_sleep_state_request(
- &request, &request_size, device.get_control_sequence(), static_cast<uint8_t>(sleep_state));
- auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- CHECK_SUCCESS(status);
-
- /* Parse response */
- status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
- &request);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file control.hpp
- * @brief Contains Defines and declarations related to controlling hailo8
- **/
-
-#ifndef __CONTROL_H__
-#define __CONTROL_H__
-
-#include <stdbool.h>
-
-#include <hailo/hailort.h>
-#include "hailo/device.hpp"
-#include "control_protocol.h"
-#include "control_protocol.hpp"
-
-namespace hailort
-{
-
-#define CONTROL__MAX_SEQUENCE (0xFFFFFFFF)
-#define CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE (1024)
-
-#define FW_MAGIC (0x1DD89DE0)
-#define FW_SUPPORTED_HEADER_VERSION (0)
-#define BOARD_CONFIG_SIZE (500)
-
-/* TODO: Is this the correct size? */
-#define RESPONSE_MAX_BUFFER_SIZE (2048)
-#define WRITE_CHUNK_SIZE (1024)
-#define WORD_SIZE (4)
-
-
-class Control final
-{
-public:
- Control() = delete;
-
- static hailo_status parse_and_validate_response(uint8_t *message, uint32_t message_size,
- CONTROL_PROTOCOL__response_header_t **header, CONTROL_PROTOCOL__payload_t **payload,
- CONTROL_PROTOCOL__request_t *request);
-
- /**
- * Receive information about the device.
- *
- * @param[in] device - The Hailo device.
- * @return The information about the board.
- */
- static Expected<hailo_device_identity_t> identify(Device &device);
-
-
- /**
- * Receive extended information about the device.
- *
- * @param[in] device - The Hailo device.
- * @return The extended information about the board.
- */
- static Expected<hailo_extended_device_information_t> get_extended_device_information(Device &device);
-
- /**
- * Receive information about the core cpu.
- *
- * @param[in] device - The Hailo device.
- * @param[out] core_info - The information about the core cpu.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status core_identify(Device &device, hailo_core_information_t *core_info);
-
- /**
- * Configure a UDP input dataflow stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] params - The stream params that would be configured.
- * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status config_stream_udp_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
-
- /**
- * Configure a UDP output dataflow stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] params - The stream params that would be configured.
- * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status config_stream_udp_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
-
- /**
- * Configure a MIPI input dataflow stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] params - The stream params that would be configured.
- * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status config_stream_mipi_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
-
- /**
- * Configure a MIPI output dataflow stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] params - The stream params that would be configured.
- * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status config_stream_mipi_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
-
- /**
- * Configure a PCIe input dataflow stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] params - The stream params that would be configured.
- * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status config_stream_pcie_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
-
- /**
- * Configure a PCIe output dataflow stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] params - The stream params that would be configured.
- * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status config_stream_pcie_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
-
- /**
- * Open a stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] dataflow_manager_id - Unique id of the dataflow manager.
- * @param[in] is_input - Indicates whether the stream is an input or an output.
- * @note The stream must be configured prior its opening;
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status open_stream(Device &device, uint8_t dataflow_manager_id, bool is_input);
-
- /**
- * Close a stream at a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] dataflow_manager_id - Unique id of the dataflow manager.
- * @param[in] is_input - Indicates whether the stream is an input or an output.
- * @note
- * 1. A stream must be opened before closing.
- * 2. A stream cannot be closed twice.
- * 3. In order to close all the streams, call \ref close_all_streams.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status close_stream(Device &device, uint8_t dataflow_manager_id, bool is_input);
- static hailo_status close_all_streams(Device &device);
-
- /**
- * Get idle time accumulated measurement.
- *
- * @param[in] device - The Hailo device.
- * @param[out] measurement - pointer to store the measurement
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status idle_time_get_measurement(Device &device, uint64_t *measurement);
-
- /**
- * start/stop idle time measurement
- *
- * @param[in] device - The Hailo device.
- * @param[in] measurement_enable - start/stop the measurement
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status idle_time_set_measurement(Device &device, uint8_t measurement_enable);
-
- /**
- * Start firmware update of a Hailo device.
- *
- * @param[in] device - The Hailo device.
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status start_firmware_update(Device &device);
- static hailo_status write_firmware_update(Device &device, uint32_t offset, const uint8_t *data, uint32_t data_length);
- static hailo_status validate_firmware_update(Device &device, MD5_SUM_t *expected_md5, uint32_t firmware_size);
- static hailo_status finish_firmware_update(Device &device);
- static hailo_status write_second_stage_to_internal_memory(Device &device, uint32_t offset, uint8_t *data, uint32_t data_length);
- static hailo_status copy_second_stage_to_flash(Device &device, MD5_SUM_t *expected_md5, uint32_t second_stage_size);
-
- static hailo_status examine_user_config(Device &device, hailo_fw_user_config_information_t *info);
-
- static hailo_status read_user_config(Device &device, uint8_t *buffer, uint32_t buffer_length);
-
- static hailo_status write_user_config(Device &device, const uint8_t *data, uint32_t data_length);
-
- static hailo_status erase_user_config(Device &device);
-
- static hailo_status read_board_config(Device &device, uint8_t *buffer, uint32_t buffer_length);
-
- static hailo_status write_board_config(Device &device, const uint8_t *data, uint32_t data_length);
-
- static hailo_status phy_operation(Device &device, CONTROL_PROTOCOL__phy_operation_t operation_type);
-
- static hailo_status config_core_top(Device &device, CONTROL_PROTOCOL__config_core_top_type_t config_type,
- CONTROL_PROTOCOL__config_core_top_params_t *params);
-
- /**
- * Write data to an I2C slave over a hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] slave_config - The configuration of the slave.
- * @param[in] register_address - The address of the register to which the data will be written
- * @param[in] data - A pointer to a buffer that contains the data to be written to the slave.
- * @param[in] length - The size of @a data in bytes.
- * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
- */
- static hailo_status i2c_write(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
- const uint8_t *data, uint32_t length);
-
- /**
- * Read data from an I2C slave over a hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] slave_config - The configuration of the slave.
- * @param[in] register_address - The address of the register from which the data will be read.
- * @param[in] data - Pointer to a buffer that would store the read data.
- * @param[in] length - The number of bytes to read into the buffer pointed by @a data.
- * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
- */
- static hailo_status i2c_read(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
- uint8_t *data, uint32_t length);
-
- /**
- * Measure the latency of a single image at the nn core of a hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[in] latency_measurement_en - Boolean if the latency should be enabled or not.
- * @param[in] inbound_start_buffer_number - The inbound buffer from which the system start the latency measurement.
- * @param[in] outbound_start_buffer_number - The outbound buffer from which the system ends the latency measurement.
- * @param[in] inbound_stream_index - Which input stream to measure latency from.
- * @param[in] outbound_stream_index - Which output stream to measure latency from.
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status latency_measurement_config(Device &device, uint8_t latency_measurement_en,
-
- uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index,
- uint32_t outbound_stream_index);
- /**
- * Read the measurement of the latency of a single image at the nn core of a hailo device.
- *
- * @param[in] device - The Hailo device.
- * @param[out] inbound_to_outbound_latency_nsec - The latency in nanoseconds.
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status latency_measurement_read(Device &device, uint32_t *inbound_to_outbound_latency_nsec);
- static hailo_status sensor_store_config(Device &device, uint32_t is_first, uint32_t section_index, uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size,
- uint8_t *data, uint32_t data_length, uint16_t config_height, uint16_t config_width, uint16_t config_fps, uint32_t config_name_length, uint8_t *config_name);
- static hailo_status sensor_get_config(Device &device, uint32_t section_index, uint32_t offset, uint32_t data_length, uint8_t *data);
- static hailo_status sensor_set_i2c_bus_index(Device &device, uint32_t sensor_type, uint32_t bus_index);
- static hailo_status sensor_load_and_start_config(Device &device, uint32_t section_index);
- static hailo_status sensor_reset(Device &device, uint32_t section_index);
- static hailo_status sensor_set_generic_i2c_slave(Device &device, uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness);
- static hailo_status sensor_get_sections_info(Device &device, uint8_t *data);
-
- /**
- * Download generated context switch action list per single context
- *
- * @param[in] device - The Hailo device.
- * @param[in] network_group_id - Unique identifier for the network group.
- * @param[in] context_type - type of context
- * @param[in] context_index - context index of the context the user wishes to download the action list. Should
- * be 0 for non-dynamic contexts.
- * @param[out] base address - base address of the context action list in the FW memory
- * @param[out] action list - buffer of the action list
- * @param[out] action_list_length - size of the action list buffer
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- // TODO: fix
- static hailo_status download_context_action_list(Device &device, uint32_t network_group_id,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index,
- size_t action_list_max_size, uint32_t *base_address, uint8_t *action_list, uint16_t *action_list_length,
- uint32_t *batch_counter);
-
- /**
- * Enable network group
- *
- * @param[in] device - The Hailo device.
- * @param[in] network_group_index - network_group index
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status enable_network_group(Device &device, uint8_t network_group_index, uint16_t dynamic_batch_size);
- /**
- * reset context switch state machine
- *
- * @param[in] device - The Hailo device.
- * @param[in] keep_nn_config_during_reset -
- * Use if in the reset flow, user wise to remain in the same network group.
- * this reset flow keep most of the configuration on the network group for faster batch switching.
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status reset_context_switch_state_machine(Device &device, bool keep_nn_config_during_reset);
- /**
- * set dataflow interrupt by control
- *
- * @param[in] device - The Hailo device.
- * @param[in] interrupt_type - casted from enum into unit8_t - type of the interrupt
- * @param[in] interrupt_index - interrupt index (PCIe channel or Cluster index)
- * @param[in] interrupt_sub_index - interrupt index (LCU index in cluster)
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status set_dataflow_interrupt(Device &device, uint8_t interrupt_type, uint8_t interrupt_index,
- uint8_t interrupt_sub_index);
-
- /**
- * set d2h manager a new host configuration by control
- *
- * @param[in] device - The Hailo device.
- * @param[in] host_port - host port in case connection_type is Ethernet, otherwise neglected.
- * @param[in] host_ip_address - host ip in case connection_type is Ethernet, otherwise neglected,
- * 0 means auto detect IP address from control.
- *
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status d2h_notification_manager_set_host_info(Device &device, uint16_t host_port, uint32_t host_ip_address);
- static hailo_status d2h_notification_manager_send_host_info_notification(Device &device, uint8_t notification_priority);
-
- /**
- * Enable/disable halt transmition following Rx pause frame
- *
- * @param[in] device - The Hailo device.
- * @param[in] rx_pause_frames_enable - Bool indicating weather to enable or disable rx pause frames
- * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
- */
- static hailo_status set_pause_frames(Device &device, uint8_t rx_pause_frames_enable);
-
- static hailo_status set_fw_logger(Device &device, hailo_fw_logger_level_t level, uint32_t interface_mask);
- static hailo_status write_memory(Device &device, uint32_t address, const uint8_t *data, uint32_t data_length);
- static hailo_status read_memory(Device &device, uint32_t address, uint8_t *data, uint32_t data_length);
- static hailo_status context_switch_set_context_info(Device &device,
- const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &context_infos);
- static hailo_status context_switch_set_network_group_header(Device &device,
- const CONTROL_PROTOCOL__application_header_t &network_group_header);
- static hailo_status wd_enable(Device &device, uint8_t cpu_id, bool should_enable);
- static hailo_status wd_config(Device &device, uint8_t cpu_id, uint32_t wd_cycles, CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode);
- static hailo_status previous_system_state(Device &device, uint8_t cpu_id, CONTROL_PROTOCOL__system_state_t *system_state);
- static hailo_status clear_configured_apps(Device &device);
- static hailo_status get_chip_temperature(Device &device, hailo_chip_temperature_info_t *temp_info);
- static hailo_status enable_debugging(Device &device, bool is_rma);
-
- static hailo_status config_context_switch_breakpoint(Device &device, uint8_t breakpoint_id,
- CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data);
- static hailo_status get_context_switch_breakpoint_status(Device &device, uint8_t breakpoint_id,
- CONTROL_PROTOCOL__context_switch_debug_sys_status_t *breakpoint_status);
- static hailo_status get_context_switch_main_header(Device &device,
- CONTROL_PROTOCOL__context_switch_main_header_t *main_header);
- static hailo_status config_context_switch_timestamp(Device &device, uint16_t batch_index, bool enable_user_configuration);
- static hailo_status test_chip_memories(Device &device);
- static hailo_status run_bist_test(Device &device, bool is_top_test, uint32_t top_bypass_bitmap,
- uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1);
- static hailo_status set_clock_freq(Device &device, uint32_t clock_freq);
- static Expected<hailo_health_info_t> get_health_information(Device &device);
- static hailo_status set_throttling_state(Device &device, bool should_activate);
- static Expected<bool> get_throttling_state(Device &device);
- static hailo_status set_overcurrent_state(Device &device, bool should_activate);
- static Expected<bool> get_overcurrent_state(Device &device);
- static Expected<CONTROL_PROTOCOL__hw_consts_t> get_hw_consts(Device &device);
- static hailo_status set_sleep_state(Device &device, hailo_sleep_state_t sleep_state);
-
- // TODO: needed?
- static hailo_status power_measurement(Device &device, CONTROL_PROTOCOL__dvm_options_t dvm,
- CONTROL_PROTOCOL__power_measurement_types_t measurement_type, float32_t *measurement);
- static hailo_status set_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, CONTROL_PROTOCOL__dvm_options_t dvm,
- CONTROL_PROTOCOL__power_measurement_types_t measurement_type);
- static hailo_status get_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, bool should_clear,
- hailo_power_measurement_data_t *measurement_data);
- static hailo_status start_power_measurement(Device &device,
- CONTROL_PROTOCOL__averaging_factor_t averaging_factor, CONTROL_PROTOCOL__sampling_period_t sampling_period);
- static hailo_status stop_power_measurement(Device &device);
-
- static Expected<uint32_t> get_partial_clusters_layout_bitmap(Device &device);
-
-private:
- static hailo_status write_memory_chunk(Device &device, uint32_t address, const uint8_t *data, uint32_t chunk_size);
- static hailo_status read_memory_chunk(Device &device, uint32_t address, uint8_t *data, uint32_t chunk_size);
- static hailo_status read_user_config_chunk(Device &device, uint32_t read_offset, uint32_t read_length,
- uint8_t *buffer, uint32_t *actual_read_data_length);
- static hailo_status write_user_config_chunk(Device &device, uint32_t offset, const uint8_t *data, uint32_t chunk_size);
- static hailo_status download_context_action_list_chunk(Device &device, uint32_t network_group_id,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, uint16_t action_list_offset,
- size_t action_list_max_size, uint32_t *base_address, uint8_t *action_list, uint16_t *action_list_length,
- bool *is_action_list_end, uint32_t *batch_counter);
- static hailo_status context_switch_set_context_info_chunk(Device &device,
- const CONTROL_PROTOCOL__context_switch_context_info_single_control_t &context_info);
- static hailo_status change_context_switch_status(Device &device,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status,
- uint8_t network_group_index, uint16_t dynamic_batch_size, bool keep_nn_config_during_reset);
- static Expected<CONTROL_PROTOCOL__get_extended_device_information_response_t> get_extended_device_info_response(Device &device);
-};
-
-} /* namespace hailort */
-
-#endif /* __CONTROL_H__ */
+++ /dev/null
-/*
- * =============================================================================
- *
- * HAILO
- *
- * Property of HAILO Tech
- * For Unrestricted Internal Use Only
- * Unauthorized reproduction and/or distribution is strictly prohibited.
- * This product is protected under copyright law and trade secret law
- * Created 2018, (C) Copyright 2018 Hailo Tech . All rights reserved.
- * as an unpublished work.
- */
-/**
-* Filename: control_protocol.c
-*
-* Description: Implements control protocol packing/unpacking.
-*
-*=============================================================================*/
-
-#include <stdint.h>
-#include <string.h>
-#include "control_protocol.h"
-#include "control_protocol.hpp"
-#include "byte_order.h"
-#include "status.h"
-#include "common/utils.hpp"
-
-using namespace hailort;
-
-#ifndef FIRMWARE_ARCH /*this file should not be compiled for firmware*/
-
-bool g_CONTROL_PROTOCOL__is_critical[HAILO_CONTROL_OPCODE_COUNT] = {
-#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) is_critical,
- CONTROL_PROTOCOL__OPCODES_VARIABLES
-#undef CONTROL_PROTOCOL__OPCODE_X
-};
-
-CPU_ID_t g_CONTROL_PROTOCOL__cpu_id[HAILO_CONTROL_OPCODE_COUNT] = {
-#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) cpu_id,
- CONTROL_PROTOCOL__OPCODES_VARIABLES
-#undef CONTROL_PROTOCOL__OPCODE_X
-};
-
-const char *CONTROL_PROTOCOL__textual_format[] =
-{
-#define STRINGIFY(name) #name
-#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) STRINGIFY(name),
- CONTROL_PROTOCOL__OPCODES_VARIABLES
-#undef CONTROL_PROTOCOL__OPCODE_X
-};
-
-const char *CONTROL_PROTOCOL__get_textual_opcode(CONTROL_PROTOCOL__OPCODE_t opcode)
-{
- return CONTROL_PROTOCOL__textual_format[opcode];
-}
-
-
-/* Functions declarations */
-HAILO_COMMON_STATUS_t control_protocol__parse_message(uint8_t *message,
- uint32_t message_size,
- CONTROL_PROTOCOL__common_header_t **header,
- uint16_t full_header_size,
- CONTROL_PROTOCOL__payload_t **payload,
- uint8_t expected_ack_value);
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__parse_response(uint8_t *message,
- uint32_t message_size,
- CONTROL_PROTOCOL__response_header_t **header,
- CONTROL_PROTOCOL__payload_t **payload,
- CONTROL_PROTOCOL__status_t *fw_status)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- if ((NULL == message) || (NULL == header) || (NULL == payload) || (NULL == fw_status)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- status = control_protocol__parse_message(message,
- message_size,
- (CONTROL_PROTOCOL__common_header_t**)header,
- sizeof(**header),
- payload,
- CONTROL_PROTOCOL__ACK_SET);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- /* Copy firmware status from header */
- fw_status->major_status = BYTE_ORDER__ntohl((*header)->status.major_status);
- fw_status->minor_status = BYTE_ORDER__ntohl((*header)->status.minor_status);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t control_protocol__parse_message(uint8_t *message,
- uint32_t message_size,
- CONTROL_PROTOCOL__common_header_t **header,
- uint16_t full_header_size,
- CONTROL_PROTOCOL__payload_t **payload,
- uint8_t expected_ack_value)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t current_offset = 0;
- CONTROL_PROTOCOL__parameter_t *current_parameter = NULL;
- uint32_t parameter_count = 0;
- CONTROL_PROTOCOL_flags_t control_flags = {};
- CONTROL_PROTOCOL__common_header_t *local_common_header = NULL;
- CONTROL_PROTOCOL__payload_t *local_payload = NULL;
- uint32_t protocol_version = 0;
-
- local_common_header = (CONTROL_PROTOCOL__common_header_t *)(message);
- protocol_version = BYTE_ORDER__ntohl(local_common_header->version);
-
- switch (protocol_version) {
- case CONTROL_PROTOCOL__PROTOCOL_VERSION_2:
- break;
- default:
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_VERSION;
- goto exit;
- break;
- }
-
- control_flags.integer = BYTE_ORDER__ntohl(local_common_header->flags.integer);
- if (expected_ack_value != control_flags.bitstruct.ack) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__UNEXPECTED_ACK_VALUE;
- goto exit;
- }
-
- current_offset = full_header_size;
- /* Check if there are any parameters to parse */
- if (current_offset < message_size) {
- local_payload = (CONTROL_PROTOCOL__payload_t *)(message + current_offset);
- current_offset += sizeof(*local_payload);
-
- /* If the are any parameters, start parsing them */
- if (0 < BYTE_ORDER__ntohl(local_payload->parameter_count)) {
- /* Check that the frame doesn't overrun after parameter count */
- if (current_offset > message_size) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__OVERRUN_BEFORE_PARAMETER;
- goto exit;
- }
- /* Validate each parameter */
- for (parameter_count = 0;
- parameter_count < BYTE_ORDER__ntohl(local_payload->parameter_count);
- ++parameter_count) {
- current_parameter = (CONTROL_PROTOCOL__parameter_t *)(
- (message) + current_offset);
- /* Check that the parameter donesn't overrun the packet */
- current_offset += sizeof(*current_parameter) + BYTE_ORDER__ntohl(current_parameter->length);
- if (current_offset > message_size) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__OVERRUN_AT_PARAMETER;
- goto exit;
- }
- }
- }
- }
-
- /* Validate all of the message was parsed */
- if (current_offset != message_size) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__PART_OF_THE_MESSAGE_NOT_PARSED;
- goto exit;
- }
-
- /* Packet is valid, assign out parameters */
- *header = local_common_header;
- local_common_header = NULL;
- *payload = local_payload;
- local_payload = NULL;
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-exit:
- return status;
-}
-
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__get_sequence_from_response_buffer(uint8_t *response_buffer,
- size_t response_buffer_size, uint32_t *sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- uint32_t local_sequence = 0;
- CONTROL_PROTOCOL__common_header_t *common_header = NULL;
-
- if ((NULL == response_buffer) || (NULL == sequence)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- if (sizeof(CONTROL_PROTOCOL__common_header_t) > response_buffer_size) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_BUFFER_SIZE;
- goto exit;
- }
-
- /* Get the sequence from the common header */
- common_header = ((CONTROL_PROTOCOL__common_header_t*)(response_buffer));
- local_sequence = BYTE_ORDER__ntohl(common_header->sequence);
-
- *sequence = local_sequence;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-void control_protocol__pack_request_header(CONTROL_PROTOCOL__request_t *request, uint32_t sequence, CONTROL_PROTOCOL__OPCODE_t opcode, uint32_t parameter_count)
-{
- request->header.common_header.opcode = BYTE_ORDER__htonl(opcode);
- request->header.common_header.sequence = BYTE_ORDER__htonl(sequence);
- request->header.common_header.version = BYTE_ORDER__htonl(CONTROL_PROTOCOL__PROTOCOL_VERSION);
-
- request->parameter_count = BYTE_ORDER__htonl(parameter_count);
-}
-
-HAILO_COMMON_STATUS_t control_protocol__pack_empty_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__OPCODE_t opcode)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- control_protocol__pack_request_header(request, sequence, opcode, 0);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_IDENTIFY);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_core_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_CORE_IDENTIFY);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_fw_logger_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- hailo_fw_logger_level_t level, uint8_t interface_mask)
-{
- size_t local_request_size = 0;
-
- CHECK(request != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
- CHECK(request_size != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
-
- CHECK(level <= (uint8_t) CONTROL_PROTOCOL__FW_MAX_LOGGER_LEVEL, HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT);
- CHECK(interface_mask <= CONTROL_PROTOCOL__FW_MAX_LOGGER_INTERFACE, HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT);
-
- static_assert((uint32_t) FW_LOGGER_LEVEL_TRACE == (uint32_t) HAILO_FW_LOGGER_LEVEL_TRACE,
- "mismatch in FW_LOGGER_LEVEL_TRACE and HAILO_FW_LOGGER_LEVEL_TRACE");
- static_assert((uint32_t) FW_LOGGER_LEVEL_DEBUG == (uint32_t) HAILO_FW_LOGGER_LEVEL_DEBUG,
- "mismatch in FW_LOGGER_LEVEL_DEBUG and HAILO_FW_LOGGER_LEVEL_DEBUG");
- static_assert((uint32_t) FW_LOGGER_LEVEL_INFO == (uint32_t) HAILO_FW_LOGGER_LEVEL_INFO,
- "mismatch in FW_LOGGER_LEVEL_INFO and HAILO_FW_LOGGER_LEVEL_INFO");
- static_assert((uint32_t) FW_LOGGER_LEVEL_WARN == (uint32_t) HAILO_FW_LOGGER_LEVEL_WARN,
- "mismatch in FW_LOGGER_LEVEL_WARN and HAILO_FW_LOGGER_LEVEL_WARN");
- static_assert((uint32_t) FW_LOGGER_LEVEL_ERROR == (uint32_t) HAILO_FW_LOGGER_LEVEL_ERROR,
- "mismatch in FW_LOGGER_LEVEL_ERROR and HAILO_FW_LOGGER_LEVEL_ERROR");
- static_assert((uint32_t) FW_LOGGER_LEVEL_FATAL == (uint32_t) HAILO_FW_LOGGER_LEVEL_FATAL,
- "mismatch in FW_LOGGER_LEVEL_FATAL and HAILO_FW_LOGGER_LEVEL_FATAL");
- static_assert((uint32_t)CONTROL_PROTOCOL__INTERFACE_PCIE == (uint32_t)HAILO_FW_LOGGER_INTERFACE_PCIE,
- "mismatch in CONTROL_PROTOCOL__INTERFACE_PCIE and HAILO_FW_LOGGER_INTERFACE_PCIE");
- static_assert((uint32_t)CONTROL_PROTOCOL__INTERFACE_UART == (uint32_t)HAILO_FW_LOGGER_INTERFACE_UART,
- "mismatch in CONTROL_PROTOCOL__INTERFACE_UART and HAILO_FW_LOGGER_INTERFACE_UART");
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_fw_logger_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_FW_LOGGER, 2);
-
- request->parameters.set_fw_logger_request.level_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_fw_logger_request.level));
- request->parameters.set_fw_logger_request.level = static_cast<uint8_t>(level);
-
- request->parameters.set_fw_logger_request.logger_interface_bit_mask_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_fw_logger_request.logger_interface_bit_mask));
- request->parameters.set_fw_logger_request.logger_interface_bit_mask = interface_mask;
-
- *request_size = local_request_size;
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- bool should_activate)
-{
- size_t local_request_size = 0;
-
- CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
- CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_throttling_state_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_THROTTLING_STATE, 1);
-
- request->parameters.set_throttling_state_request.should_activate_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_throttling_state_request.should_activate));
- request->parameters.set_throttling_state_request.should_activate = should_activate;
-
- *request_size = local_request_size;
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_THROTTLING_STATE);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- bool should_activate)
-{
- size_t local_request_size = 0;
-
- CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
- CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_overcurrent_state_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_OVERCURRENT_STATE, 1);
-
- request->parameters.set_overcurrent_state_request.should_activate_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_overcurrent_state_request.should_activate));
- request->parameters.set_overcurrent_state_request.should_activate = should_activate;
-
- *request_size = local_request_size;
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_OVERCURRENT_STATE);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_hw_consts_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_HW_CONSTS);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_clock_freq_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint32_t clock_freq)
-{
- size_t local_request_size = 0;
-
- CHECK(request != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
- CHECK(request_size != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_clock_freq_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_CLOCK_FREQ, 1);
-
- request->parameters.set_clock_freq_request.clock_freq_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_clock_freq_request.clock_freq));
- request->parameters.set_clock_freq_request.clock_freq = BYTE_ORDER__htonl(clock_freq);
-
- *request_size = local_request_size;
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_memory_request_t) + data_length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_MEMORY, 2);
-
- /* Address */
- request->parameters.write_memory_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_memory_request.address));
- request->parameters.write_memory_request.address = BYTE_ORDER__htonl(address);
-
- /* Data */
- request->parameters.write_memory_request.data_length = BYTE_ORDER__htonl(data_length);
- memcpy(&(request->parameters.write_memory_request.data), data, data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__read_memory_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_READ_MEMORY, 2);
-
- /* Address */
- request->parameters.read_memory_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_memory_request.address));
- request->parameters.read_memory_request.address = BYTE_ORDER__htonl(address);
-
- /* Data count */
- request->parameters.read_memory_request.data_count_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_memory_request.data_count));
- request->parameters.read_memory_request.data_count = BYTE_ORDER__htonl(data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_open_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__open_stream_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_OPEN_STREAM, 2);
-
- /* dataflow_manager_id */
- request->parameters.open_stream_request.dataflow_manager_id_length = BYTE_ORDER__htonl(sizeof(request->parameters.open_stream_request.dataflow_manager_id));
- request->parameters.open_stream_request.dataflow_manager_id = dataflow_manager_id;
-
- /* is_input */
- request->parameters.open_stream_request.is_input_length = BYTE_ORDER__htonl(sizeof(request->parameters.open_stream_request.is_input));
- request->parameters.open_stream_request.is_input = is_input;
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_close_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__close_stream_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CLOSE_STREAM, 2);
-
- /* dataflow_manager_id */
- request->parameters.close_stream_request.dataflow_manager_id_length = BYTE_ORDER__htonl(sizeof(request->parameters.close_stream_request.dataflow_manager_id));
- request->parameters.close_stream_request.dataflow_manager_id = dataflow_manager_id;
-
- /* is_input */
- request->parameters.close_stream_request.is_input_length = BYTE_ORDER__htonl(sizeof(request->parameters.close_stream_request.is_input));
- request->parameters.close_stream_request.is_input = is_input;
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t control_protocol__pack_config_stream_base_request(CONTROL_PROTOCOL__request_t *request, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- /* stream index */
- request->parameters.config_stream_request.stream_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.stream_index));
- request->parameters.config_stream_request.stream_index = params->stream_index;
-
- /* is_input */
- request->parameters.config_stream_request.is_input_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.is_input));
- request->parameters.config_stream_request.is_input = params->is_input;
-
- /* communication_type */
- request->parameters.config_stream_request.communication_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.communication_type));
- request->parameters.config_stream_request.communication_type = BYTE_ORDER__htonl(params->communication_type);
-
- /* skip_nn_stream_config */
- request->parameters.config_stream_request.skip_nn_stream_config_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.skip_nn_stream_config));
- request->parameters.config_stream_request.skip_nn_stream_config = params->skip_nn_stream_config;
-
- /* power_mode */
- request->parameters.config_stream_request.power_mode_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.power_mode));
- request->parameters.config_stream_request.power_mode = params->power_mode;
-
- /* nn_stream_config */
- request->parameters.config_stream_request.nn_stream_config_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.nn_stream_config));
- request->parameters.config_stream_request.nn_stream_config.core_bytes_per_buffer = BYTE_ORDER__htons(params->nn_stream_config.core_bytes_per_buffer);
- request->parameters.config_stream_request.nn_stream_config.core_buffers_per_frame = BYTE_ORDER__htons(params->nn_stream_config.core_buffers_per_frame);
- request->parameters.config_stream_request.nn_stream_config.periph_bytes_per_buffer = BYTE_ORDER__htons(params->nn_stream_config.periph_bytes_per_buffer);
- request->parameters.config_stream_request.nn_stream_config.periph_buffers_per_frame = BYTE_ORDER__htons(params->nn_stream_config.periph_buffers_per_frame);
- request->parameters.config_stream_request.nn_stream_config.feature_padding_payload = BYTE_ORDER__htons(params->nn_stream_config.feature_padding_payload);
- request->parameters.config_stream_request.nn_stream_config.buffer_padding_payload = BYTE_ORDER__htons(params->nn_stream_config.buffer_padding_payload);
- request->parameters.config_stream_request.nn_stream_config.buffer_padding = BYTE_ORDER__htons(params->nn_stream_config.buffer_padding);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
- goto exit;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__udp_input_config_params_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
-
- status = control_protocol__pack_config_stream_base_request(request, params);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.udp_input));
- request->parameters.config_stream_request.communication_params.udp_input.listening_port = BYTE_ORDER__htons(params->communication_params.udp_input.listening_port);
-
- request->parameters.config_stream_request.communication_params.udp_input.sync.should_sync = params->communication_params.udp_input.sync.should_sync;
- request->parameters.config_stream_request.communication_params.udp_input.sync.frames_per_sync = BYTE_ORDER__htonl(params->communication_params.udp_input.sync.frames_per_sync);
- request->parameters.config_stream_request.communication_params.udp_input.sync.packets_per_frame = BYTE_ORDER__htonl(params->communication_params.udp_input.sync.packets_per_frame);
- request->parameters.config_stream_request.communication_params.udp_input.sync.sync_size = BYTE_ORDER__htons(params->communication_params.udp_input.sync.sync_size);
-
- request->parameters.config_stream_request.communication_params.udp_input.buffers_threshold = BYTE_ORDER__htonl(params->communication_params.udp_input.buffers_threshold);
- request->parameters.config_stream_request.communication_params.udp_input.use_rtp = params->communication_params.udp_input.use_rtp;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__udp_output_config_params_t);
-
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
-
- status = control_protocol__pack_config_stream_base_request(request, params);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.udp_output));
- request->parameters.config_stream_request.communication_params.udp_output.host_udp_port = BYTE_ORDER__htons(params->communication_params.udp_output.host_udp_port);
- request->parameters.config_stream_request.communication_params.udp_output.chip_udp_port = BYTE_ORDER__htons(params->communication_params.udp_output.chip_udp_port);
- request->parameters.config_stream_request.communication_params.udp_output.max_udp_payload_size = BYTE_ORDER__htons(params->communication_params.udp_output.max_udp_payload_size);
- request->parameters.config_stream_request.communication_params.udp_output.should_send_sync_packets = params->communication_params.udp_output.should_send_sync_packets;
- request->parameters.config_stream_request.communication_params.udp_output.buffers_threshold = BYTE_ORDER__htonl(params->communication_params.udp_output.buffers_threshold);
- request->parameters.config_stream_request.communication_params.udp_output.use_rtp = params->communication_params.udp_output.use_rtp;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- /* Calculate the size of the exact mipi_input configuration struct instead of the entire union */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__mipi_input_config_params_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
-
- status = control_protocol__pack_config_stream_base_request(request, params);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.mipi_input));
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.data_type = params->communication_params.mipi_input.common_params.data_type;
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.pixels_per_clock = params->communication_params.mipi_input.common_params.pixels_per_clock;
- request->parameters.config_stream_request.communication_params.mipi_input.mipi_rx_id = params->communication_params.mipi_input.mipi_rx_id;
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.number_of_lanes = params->communication_params.mipi_input.common_params.number_of_lanes;
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.clock_selection = params->communication_params.mipi_input.common_params.clock_selection;
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.data_rate = BYTE_ORDER__htonl(params->communication_params.mipi_input.common_params.data_rate);
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.virtual_channel_index = params->communication_params.mipi_input.common_params.virtual_channel_index;
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.img_width_pixels = params->communication_params.mipi_input.common_params.img_width_pixels;
- request->parameters.config_stream_request.communication_params.mipi_input.common_params.img_height_pixels = params->communication_params.mipi_input.common_params.img_height_pixels;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_enable = params->communication_params.mipi_input.isp_params.isp_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_img_in_order = params->communication_params.mipi_input.isp_params.isp_img_in_order;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_img_out_data_type = params->communication_params.mipi_input.isp_params.isp_img_out_data_type;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_enable = params->communication_params.mipi_input.isp_params.isp_crop_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_width_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_width_pixels;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_height_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_height_pixels;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_width_start_offset_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_width_start_offset_pixels;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_height_start_offset_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_height_start_offset_pixels;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_test_pattern_enable = params->communication_params.mipi_input.isp_params.isp_test_pattern_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_configuration_bypass = params->communication_params.mipi_input.isp_params.isp_configuration_bypass;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_ae_enable = params->communication_params.mipi_input.isp_params.isp_run_time_ae_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_awb_enable = params->communication_params.mipi_input.isp_params.isp_run_time_awb_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_adt_enable = params->communication_params.mipi_input.isp_params.isp_run_time_adt_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_af_enable = params->communication_params.mipi_input.isp_params.isp_run_time_af_enable;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_calculations_interval_ms = params->communication_params.mipi_input.isp_params.isp_run_time_calculations_interval_ms;
- request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_light_frequency = params->communication_params.mipi_input.isp_params.isp_light_frequency;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- /* Calculate the size of the exact mipi_output configuration struct instead of the entire union */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__mipi_output_config_params_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
-
- status = control_protocol__pack_config_stream_base_request(request, params);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.mipi_output));
- request->parameters.config_stream_request.communication_params.mipi_output.fifo_threshold_percent = params->communication_params.mipi_output.fifo_threshold_percent;
- request->parameters.config_stream_request.communication_params.mipi_output.mipi_tx_id = params->communication_params.mipi_output.mipi_tx_id;
- request->parameters.config_stream_request.communication_params.mipi_output.deskew_enable = params->communication_params.mipi_output.deskew_enable;
- request->parameters.config_stream_request.communication_params.mipi_output.common_params.data_rate = BYTE_ORDER__htonl(params->communication_params.mipi_output.common_params.data_rate);
- request->parameters.config_stream_request.communication_params.mipi_output.common_params.clock_selection = params->communication_params.mipi_output.common_params.clock_selection;
- request->parameters.config_stream_request.communication_params.mipi_output.common_params.data_type = params->communication_params.mipi_output.common_params.data_type;
- request->parameters.config_stream_request.communication_params.mipi_output.common_params.number_of_lanes = params->communication_params.mipi_output.common_params.number_of_lanes;
- request->parameters.config_stream_request.communication_params.mipi_output.common_params.pixels_per_clock = params->communication_params.mipi_output.common_params.pixels_per_clock;
- request->parameters.config_stream_request.communication_params.mipi_output.common_params.virtual_channel_index = params->communication_params.mipi_output.common_params.virtual_channel_index;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t)
- - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__pcie_input_config_params_t);
-
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
-
- status = control_protocol__pack_config_stream_base_request(request, params);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- request->parameters.config_stream_request.communication_params_length =
- BYTE_ORDER__htonl(sizeof(params->communication_params.pcie_input));
- request->parameters.config_stream_request.communication_params.pcie_input.pcie_channel_index =
- params->communication_params.pcie_input.pcie_channel_index;
- request->parameters.config_stream_request.communication_params.pcie_input.pcie_dataflow_type =
- params->communication_params.pcie_input.pcie_dataflow_type;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t)
- - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__pcie_output_config_params_t);
-
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
-
- status = control_protocol__pack_config_stream_base_request(request, params);
- if (HAILO_COMMON_STATUS__SUCCESS != status) {
- goto exit;
- }
-
- request->parameters.config_stream_request.communication_params_length =
- BYTE_ORDER__htonl(sizeof(params->communication_params.pcie_output));
- request->parameters.config_stream_request.communication_params.pcie_output.pcie_channel_index =
- params->communication_params.pcie_output.pcie_channel_index;
- request->parameters.config_stream_request.communication_params.pcie_output.desc_page_size =
- params->communication_params.pcie_output.desc_page_size;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__reset_type_t reset_type)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__reset_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_RESET, 1);
-
- /* reset_type */
- request->parameters.reset_resquest.reset_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.reset_resquest.reset_type));
- request->parameters.reset_resquest.reset_type = BYTE_ORDER__htonl((uint32_t)reset_type);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__power_measurement_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_POWER_MEASUEMENT, 2);
-
- /* dvm */
- request->parameters.measure_power_request.dvm_length = BYTE_ORDER__htonl(sizeof(request->parameters.measure_power_request.dvm_length));
- request->parameters.measure_power_request.dvm = BYTE_ORDER__htonl((uint32_t)dvm);
-
-
- /* measurement_type */
- request->parameters.measure_power_request.measurement_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.measure_power_request.measurement_type));
- request->parameters.measure_power_request.measurement_type = BYTE_ORDER__htonl((uint32_t)measurement_type);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_power_measurement_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_POWER_MEASUEMENT, 3);
-
- /* index */
- request->parameters.set_measure_power_request.index_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.set_measure_power_request.index));
- request->parameters.set_measure_power_request.index = BYTE_ORDER__htonl(index);
-
- /* dvm */
- request->parameters.set_measure_power_request.dvm_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.set_measure_power_request.dvm));
- request->parameters.set_measure_power_request.dvm = BYTE_ORDER__htonl((uint32_t)dvm);
-
-
- /* measurement_type */
- request->parameters.set_measure_power_request.measurement_type_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.set_measure_power_request.measurement_type));
- request->parameters.set_measure_power_request.measurement_type = BYTE_ORDER__htonl((uint32_t)measurement_type);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, bool should_clear)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__get_power_measurement_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_GET_POWER_MEASUEMENT, 2);
-
- /* index */
- request->parameters.get_measure_power_request.index_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.get_measure_power_request.index));
- request->parameters.get_measure_power_request.index = BYTE_ORDER__htonl(index);
-
- /* should_clear */
- request->parameters.get_measure_power_request.should_clear_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.get_measure_power_request.should_clear));
- request->parameters.get_measure_power_request.should_clear = (uint8_t)should_clear;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t delay_milliseconds, CONTROL_PROTOCOL__averaging_factor_t averaging_factor , CONTROL_PROTOCOL__sampling_period_t sampling_period)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
- uint16_t local_averaging_factor = 0;
- uint16_t local_sampling_period = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- local_averaging_factor = ((uint16_t)(averaging_factor));
- local_sampling_period = ((uint16_t)(sampling_period));
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__start_power_measurement_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_START_POWER_MEASUEMENT, 3);
-
- /* delay_milliseconds */
- request->parameters.start_measure_power_request.delay_milliseconds_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.start_measure_power_request.delay_milliseconds));
- request->parameters.start_measure_power_request.delay_milliseconds = BYTE_ORDER__htonl(delay_milliseconds);
-
- /* averaging_factor */
- request->parameters.start_measure_power_request.averaging_factor_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.start_measure_power_request.averaging_factor));
- request->parameters.start_measure_power_request.averaging_factor = BYTE_ORDER__htons(local_averaging_factor);
-
- /* sampling_period */
- request->parameters.start_measure_power_request.sampling_period_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.start_measure_power_request.sampling_period));
- request->parameters.start_measure_power_request.sampling_period = BYTE_ORDER__htons(local_sampling_period);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_write_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
- uint32_t sequence, uint32_t register_address, uint8_t endianness, uint16_t slave_address,
- uint8_t register_address_size, uint8_t bus_index, const uint8_t *data, uint32_t length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__i2c_write_request_t) + length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_I2C_WRITE, 7);
-
- /* register_address */
- request->parameters.i2c_write_request.register_address_size = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_write_request.register_address));
- request->parameters.i2c_write_request.register_address = BYTE_ORDER__htonl(register_address);
-
- /* endianness */
- request->parameters.i2c_write_request.slave_config.endianness_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_write_request.slave_config.endianness));
- request->parameters.i2c_write_request.slave_config.endianness = endianness;
-
- /* slave_address */
- request->parameters.i2c_write_request.slave_config.slave_address_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_write_request.slave_config.slave_address));
- request->parameters.i2c_write_request.slave_config.slave_address = BYTE_ORDER__htons(slave_address);
-
- /* register_address_size */
- request->parameters.i2c_write_request.slave_config.register_address_size_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_write_request.slave_config.register_address_size));
- request->parameters.i2c_write_request.slave_config.register_address_size = register_address_size;
-
- /* bus_index */
- request->parameters.i2c_write_request.slave_config.bus_index_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_write_request.slave_config.bus_index));
- request->parameters.i2c_write_request.slave_config.bus_index = bus_index;
-
- /* Data */
- request->parameters.i2c_write_request.data_length = BYTE_ORDER__htonl(length);
- memcpy(&(request->parameters.i2c_write_request.data), data, length);
-
- /* should_hold_bus */
- request->parameters.i2c_write_request.slave_config.should_hold_bus_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_write_request.slave_config.should_hold_bus));
- request->parameters.i2c_write_request.slave_config.should_hold_bus = false;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
- uint32_t sequence, uint32_t register_address, uint8_t endianness,
- uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, uint32_t length, bool should_hold_bus)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__i2c_read_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_I2C_READ, 7);
-
- /* data_length */
- request->parameters.i2c_read_request.data_length_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.data_length));
- request->parameters.i2c_read_request.data_length = BYTE_ORDER__htonl(length);
-
- /* register_address */
- request->parameters.i2c_read_request.register_address_size = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.register_address));
- request->parameters.i2c_read_request.register_address = BYTE_ORDER__htonl(register_address);
-
- /* endianness */
- request->parameters.i2c_read_request.slave_config.endianness_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.slave_config.endianness));
- request->parameters.i2c_read_request.slave_config.endianness = endianness;
-
- /* slave_address */
- request->parameters.i2c_read_request.slave_config.slave_address_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.slave_config.slave_address));
- request->parameters.i2c_read_request.slave_config.slave_address = BYTE_ORDER__htons(slave_address);
-
- /* register_address_size */
- request->parameters.i2c_read_request.slave_config.register_address_size_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.slave_config.register_address_size));
- request->parameters.i2c_read_request.slave_config.register_address_size = register_address_size;
-
- /* bus_index */
- request->parameters.i2c_read_request.slave_config.bus_index_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.slave_config.bus_index));
- request->parameters.i2c_read_request.slave_config.bus_index = bus_index;
-
- /* should_hold_bus */
- request->parameters.i2c_read_request.slave_config.should_hold_bus_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.i2c_read_request.slave_config.should_hold_bus));
- request->parameters.i2c_read_request.slave_config.should_hold_bus = should_hold_bus;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_stop_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_STOP_POWER_MEASUEMENT);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_core_top_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_core_top_type_t config_type, CONTROL_PROTOCOL__config_core_top_params_t *params)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_core_top_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_CORE_TOP, 2);
-
- /* config_type */
- request->parameters.config_core_top_request.config_type_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.config_core_top_request.config_type));
- request->parameters.config_core_top_request.config_type = BYTE_ORDER__htonl(config_type);
-
- /* params */
- request->parameters.config_core_top_request.config_params_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.config_core_top_request.config_params));
- (void)memcpy(&request->parameters.config_core_top_request.config_params,
- params,
- sizeof(request->parameters.config_core_top_request.config_params));
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_phy_operation_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__phy_operation_t operation_type)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__phy_operation_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_PHY_OPERATION, 1);
-
- /* operation_type */
- request->parameters.phy_operation_request.operation_type_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.phy_operation_request.operation_type));
- request->parameters.phy_operation_request.operation_type = BYTE_ORDER__htonl((uint32_t)operation_type);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__read_user_config_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_READ_USER_CONFIG, 2);
-
- /* Address */
- request->parameters.read_user_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_user_config_request.address));
- request->parameters.read_user_config_request.address = BYTE_ORDER__htonl(address);
-
- /* Data count */
- request->parameters.read_user_config_request.data_count_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_user_config_request.data_count));
- request->parameters.read_user_config_request.data_count = BYTE_ORDER__htonl(data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_examine_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_EXAMINE_USER_CONFIG, 0);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_user_config_request_t) + data_length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_USER_CONFIG, 2);
-
- /* Address */
- request->parameters.write_user_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_user_config_request.address));
- request->parameters.write_user_config_request.address = BYTE_ORDER__htonl(address);
-
- /* Data */
- request->parameters.write_user_config_request.data_length = BYTE_ORDER__htonl(data_length);
- memcpy(&(request->parameters.write_user_config_request.data), data, data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_erase_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_ERASE_USER_CONFIG, 0);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE ;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_START_FIRMWARE_UPDATE, 0);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_finish_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE ;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_FINISH_FIRMWARE_UPDATE, 0);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t offset, const uint8_t *data, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_firmware_update_request_t) + data_length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_FIRMWARE_UPDATE, 2);
-
- /* offset */
- request->parameters.write_firmware_update_request.offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_firmware_update_request.offset));
- request->parameters.write_firmware_update_request.offset = BYTE_ORDER__htonl(offset);
-
- /* data */
- request->parameters.write_firmware_update_request.data_length = BYTE_ORDER__htonl(data_length);
- memcpy(&(request->parameters.write_firmware_update_request.data), data, data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t offset, uint8_t *data, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request_t) + data_length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_SECOND_STAGE_TO_INTERNAL_MEMORY, 2);
-
- /* offset */
- request->parameters.write_second_stage_to_internal_memory_request.offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_second_stage_to_internal_memory_request.offset));
- request->parameters.write_second_stage_to_internal_memory_request.offset = BYTE_ORDER__htonl(offset);
-
- /* data */
- request->parameters.write_second_stage_to_internal_memory_request.data_length = BYTE_ORDER__htonl(data_length);
- memcpy(&(request->parameters.write_second_stage_to_internal_memory_request.data), data, data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__copy_second_stage_to_flash_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, MD5_SUM_t *expected_md5, uint32_t second_stage_size)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__copy_second_stage_to_flash_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_COPY_SECOND_STAGE_TO_FLASH, 2);
-
- /* expected md5 */
- request->parameters.copy_second_stage_to_flash_request.expected_md5_length = BYTE_ORDER__htonl(sizeof(request->parameters.copy_second_stage_to_flash_request.expected_md5));
- memcpy(&(request->parameters.copy_second_stage_to_flash_request.expected_md5),
- *expected_md5,
- sizeof(request->parameters.copy_second_stage_to_flash_request.expected_md5));
-
- /* second_stage_size */
- request->parameters.copy_second_stage_to_flash_request.second_stage_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.copy_second_stage_to_flash_request.second_stage_size));
- request->parameters.copy_second_stage_to_flash_request.second_stage_size = BYTE_ORDER__htonl(second_stage_size);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_validate_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, MD5_SUM_t *expected_md5, uint32_t firmware_size)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__validate_firmware_update_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_VALIDATE_FIRMWARE_UPDATE, 2);
-
- /* expected md5 */
- request->parameters.validate_firmware_update_request.expected_md5_length = BYTE_ORDER__htonl(sizeof(request->parameters.validate_firmware_update_request.expected_md5));
- memcpy(&(request->parameters.validate_firmware_update_request.expected_md5),
- *expected_md5,
- sizeof(request->parameters.validate_firmware_update_request.expected_md5));
-
- /* firmware_size */
- request->parameters.validate_firmware_update_request.firmware_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.validate_firmware_update_request.firmware_size));
- request->parameters.validate_firmware_update_request.firmware_size = BYTE_ORDER__htonl(firmware_size);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t latency_measurement_en, uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index, uint32_t outbound_stream_index)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__latency_config_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_NN_CORE_LATENCY_MEASUREMENT_CONFIG, 5);
-
- /* latency_measurement_en */
- request->parameters.latency_config_request.latency_measurement_en_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.latency_config_request.latency_measurement_en));
- request->parameters.latency_config_request.latency_measurement_en = latency_measurement_en;
-
- /* inbound_start_buffer_number */
- request->parameters.latency_config_request.inbound_start_buffer_number_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.latency_config_request.inbound_start_buffer_number));
- request->parameters.latency_config_request.inbound_start_buffer_number = BYTE_ORDER__htonl(inbound_start_buffer_number);
-
- /* outbound_stop_buffer_number */
- request->parameters.latency_config_request.outbound_stop_buffer_number_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.latency_config_request.outbound_stop_buffer_number));
- request->parameters.latency_config_request.outbound_stop_buffer_number = BYTE_ORDER__htonl(outbound_stop_buffer_number);
-
- /* inbound_stream_index */
- request->parameters.latency_config_request.inbound_stream_index_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.latency_config_request.inbound_stream_index));
- request->parameters.latency_config_request.inbound_stream_index = BYTE_ORDER__htonl(inbound_stream_index);
-
- /* outbound_stream_index */
- request->parameters.latency_config_request.outbound_stream_index_length = BYTE_ORDER__htonl(
- sizeof(request->parameters.latency_config_request.outbound_stream_index));
- request->parameters.latency_config_request.outbound_stream_index = BYTE_ORDER__htonl(outbound_stream_index);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_NN_CORE_LATENCY_MEASUREMENT_READ, 0);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_store_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t is_first, uint32_t section_index,
- uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size,
- uint8_t *data, uint32_t data_length, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
- uint32_t config_name_length, uint8_t *config_name)
-
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_store_config_request_t) + data_length;
-
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_STORE_CONFIG, 11);
-
- /* section index */
- request->parameters.sensor_store_config_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.section_index));
- request->parameters.sensor_store_config_request.section_index = BYTE_ORDER__htonl(section_index);
-
- /* is_first */
- request->parameters.sensor_store_config_request.is_first_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.is_first));
- request->parameters.sensor_store_config_request.is_first = BYTE_ORDER__htonl(is_first);
-
- /* start_offset */
- request->parameters.sensor_store_config_request.start_offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.start_offset));
- request->parameters.sensor_store_config_request.start_offset = BYTE_ORDER__htonl(start_offset);
-
- /* reset_data_size */
- request->parameters.sensor_store_config_request.reset_data_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.reset_data_size));
- request->parameters.sensor_store_config_request.reset_data_size = BYTE_ORDER__htonl(reset_data_size);
-
- /* sensor_type */
- request->parameters.sensor_store_config_request.sensor_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.sensor_type));
- request->parameters.sensor_store_config_request.sensor_type = BYTE_ORDER__htonl(sensor_type);
-
- /* total_data_size */
- request->parameters.sensor_store_config_request.total_data_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.total_data_size));
- request->parameters.sensor_store_config_request.total_data_size = BYTE_ORDER__htonl(total_data_size);
-
- /* config_width */
- request->parameters.sensor_store_config_request.config_width_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.config_width));
- request->parameters.sensor_store_config_request.config_width = BYTE_ORDER__htons(config_width);
-
- /* config_height */
- request->parameters.sensor_store_config_request.config_height_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.config_height));
- request->parameters.sensor_store_config_request.config_height = BYTE_ORDER__htons(config_height);
-
- /* config_fps */
- request->parameters.sensor_store_config_request.config_fps_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.config_fps));
- request->parameters.sensor_store_config_request.config_fps = BYTE_ORDER__htons(config_fps);
-
- /* Config_name */
- if(config_name_length <= MAX_CONFIG_NAME_LEN){
- request->parameters.sensor_store_config_request.config_name_length = BYTE_ORDER__htonl(MAX_CONFIG_NAME_LEN);
- memcpy(&(request->parameters.sensor_store_config_request.config_name), config_name, config_name_length);
- }
- else{
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
- goto exit;
- }
-
- /* Data */
- request->parameters.sensor_store_config_request.data_length = BYTE_ORDER__htonl(data_length);
- memcpy(&(request->parameters.sensor_store_config_request.data), data, data_length);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint32_t section_index, uint32_t offset, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_get_config_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_GET_CONFIG, 3);
-
- /* section_index */
- request->parameters.sensor_get_config_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_get_config_request.section_index));
- request->parameters.sensor_get_config_request.section_index = BYTE_ORDER__htonl(section_index);
-
- /* offset */
- request->parameters.sensor_get_config_request.offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_get_config_request.offset));
- request->parameters.sensor_get_config_request.offset = BYTE_ORDER__htonl(offset);
-
- /* Data count */
- request->parameters.sensor_get_config_request.data_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_get_config_request.data_size));
- request->parameters.sensor_get_config_request.data_size = BYTE_ORDER__htonl(data_length);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-hailo_status CONTROL_PROTOCOL__pack_sensor_set_i2c_bus_index_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t sensor_type, uint32_t bus_index)
-{
- size_t local_request_size = 0;
-
- CHECK_ARG_NOT_NULL(request);
- CHECK_ARG_NOT_NULL(request_size);
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_set_i2c_bus_index_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_SET_I2C_BUS_INDEX, 2);
-
- /* section index */
- request->parameters.sensor_set_i2c_bus_index.sensor_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_i2c_bus_index.sensor_type));
- request->parameters.sensor_set_i2c_bus_index.sensor_type = BYTE_ORDER__htonl(sensor_type);
-
- /* bus_index */
- request->parameters.sensor_set_i2c_bus_index.i2c_bus_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_i2c_bus_index.i2c_bus_index));
- request->parameters.sensor_set_i2c_bus_index.i2c_bus_index = BYTE_ORDER__htonl(bus_index);
-
- *request_size = local_request_size;
-
- return HAILO_SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_load_and_start_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) ) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_load_config_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_LOAD_AND_START, 1);
-
- /* section index */
- request->parameters.sensor_load_config_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_load_config_request.section_index));
- request->parameters.sensor_load_config_request.section_index = BYTE_ORDER__htonl(section_index);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) ) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_reset_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_RESET, 1);
-
- /* section index */
- request->parameters.sensor_reset_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_reset_request.section_index));
- request->parameters.sensor_reset_request.section_index = BYTE_ORDER__htonl(section_index);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_set_generic_i2c_slave_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint16_t slave_address,
- uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) ) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_set_generic_i2c_slave_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_SET_GENERIC_I2C_SLAVE, 5);
-
- /* slave_address */
- request->parameters.sensor_set_generic_i2c_slave_request.slave_address_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.slave_address));
- request->parameters.sensor_set_generic_i2c_slave_request.slave_address = BYTE_ORDER__htons(slave_address);
-
- /* register_address_size */
- request->parameters.sensor_set_generic_i2c_slave_request.register_address_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.register_address_size));
- request->parameters.sensor_set_generic_i2c_slave_request.register_address_size = register_address_size;
-
- /* bus index */
- request->parameters.sensor_set_generic_i2c_slave_request.bus_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.bus_index));
- request->parameters.sensor_set_generic_i2c_slave_request.bus_index = bus_index;
-
- /* should_hold_bus */
- request->parameters.sensor_set_generic_i2c_slave_request.should_hold_bus_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.should_hold_bus));
- request->parameters.sensor_set_generic_i2c_slave_request.should_hold_bus = should_hold_bus;
-
- /* endianness */
- request->parameters.sensor_set_generic_i2c_slave_request.endianness_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.endianness));
- request->parameters.sensor_set_generic_i2c_slave_request.endianness = endianness;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_sections_info_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_SENSOR_GET_SECTIONS_INFO);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_network_group_header_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- const CONTROL_PROTOCOL__application_header_t *network_group_header)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == network_group_header)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__context_switch_set_network_group_header_request_t);
- control_protocol__pack_request_header(request, sequence,
- HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_SET_NETWORK_GROUP_HEADER, 1);
-
- /* application_header */
- request->parameters.context_switch_set_network_group_header_request.application_header_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_network_group_header_request.application_header));
- memcpy(&(request->parameters.context_switch_set_network_group_header_request.application_header),
- network_group_header,
- sizeof(request->parameters.context_switch_set_network_group_header_request.application_header));
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_context_info_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- const CONTROL_PROTOCOL__context_switch_context_info_single_control_t *context_info)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == context_info)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__context_switch_set_context_info_request_t) + context_info->context_network_data_length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_SET_CONTEXT_INFO, 5);
-
- /* is_first_control_per_context */
- request->parameters.context_switch_set_context_info_request.is_first_control_per_context_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.is_first_control_per_context));
- request->parameters.context_switch_set_context_info_request.is_first_control_per_context =
- context_info->is_first_control_per_context;
-
- /* is_last_control_per_context */
- request->parameters.context_switch_set_context_info_request.is_last_control_per_context_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.is_last_control_per_context));
- request->parameters.context_switch_set_context_info_request.is_last_control_per_context =
- context_info->is_last_control_per_context;
-
- /* context_type */
- request->parameters.context_switch_set_context_info_request.context_type_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.context_type));
- request->parameters.context_switch_set_context_info_request.context_type =
- context_info->context_type;
-
- /* actions_count */
- request->parameters.context_switch_set_context_info_request.actions_count_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.actions_count));
- request->parameters.context_switch_set_context_info_request.actions_count =
- BYTE_ORDER__htonl(context_info->actions_count);
-
- /* Network data (edge layers + Trigger groups) */
- if (CONTROL_PROTOCOL__CONTEXT_NETWORK_DATA_SINGLE_CONTROL_MAX_SIZE < context_info->context_network_data_length) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_BUFFER_SIZE;
- goto exit;
- }
- request->parameters.context_switch_set_context_info_request.context_network_data_length =
- BYTE_ORDER__htonl(context_info->context_network_data_length);
- memcpy(&(request->parameters.context_switch_set_context_info_request.context_network_data),
- &(context_info->context_network_data), context_info->context_network_data_length);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request(CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_IDLE_TIME_GET_MEASUREMENT);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request(CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t measurement_enable)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__idle_time_set_measurement_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_IDLE_TIME_SET_MEASUREMENT, 1);
-
- /*measurement duration*/
- request->parameters.idle_time_set_measurement_request.measurement_enable_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.idle_time_set_measurement_request.measurement_enable));
- request->parameters.idle_time_set_measurement_request.measurement_enable = measurement_enable;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_pause_frames_request(CONTROL_PROTOCOL__request_t *request,
- size_t *request_size, uint32_t sequence, uint8_t rx_pause_frames_enable)
-{
-
- CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
- CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
-
- /* Header */
- size_t local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_pause_frames_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_PAUSE_FRAMES, 1);
-
- /*measurement duration*/
- request->parameters.set_pause_frames_request.rx_pause_frames_enable_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.set_pause_frames_request.rx_pause_frames_enable));
- request->parameters.set_pause_frames_request.rx_pause_frames_enable = rx_pause_frames_enable;
-
- *request_size = local_request_size;
-
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_download_context_action_list_request(CONTROL_PROTOCOL__request_t *request,
- size_t *request_size, uint32_t sequence, uint32_t network_group_id,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, uint16_t action_list_offset)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__download_context_action_list_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_DOWNLOAD_CONTEXT_ACTION_LIST, 4);
-
- /* network_group_id */
- request->parameters.download_context_action_list_request.network_group_id_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.network_group_id));
- request->parameters.download_context_action_list_request.network_group_id = BYTE_ORDER__htonl(network_group_id);
-
- /* context_type */
- request->parameters.download_context_action_list_request.context_type_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.context_type));
- request->parameters.download_context_action_list_request.context_type = static_cast<uint8_t>(context_type);
-
- /* context_index */
- request->parameters.download_context_action_list_request.context_index_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.context_index));
- request->parameters.download_context_action_list_request.context_index = context_index;
-
- /* action_list_offset */
- request->parameters.download_context_action_list_request.action_list_offset_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.action_list_offset));
- request->parameters.download_context_action_list_request.action_list_offset = BYTE_ORDER__htons(action_list_offset);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status, uint8_t application_index,
- uint16_t dynamic_batch_size, bool keep_nn_config_during_reset)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__change_context_switch_status_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CHANGE_CONTEXT_SWITCH_STATUS, 4);
-
- /* state_machine_status */
- request->parameters.change_context_switch_status_request.state_machine_status_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.state_machine_status));
- memcpy(&(request->parameters.change_context_switch_status_request.state_machine_status),
- &(state_machine_status),
- sizeof(request->parameters.change_context_switch_status_request.state_machine_status));
-
- /* application_index */
- request->parameters.change_context_switch_status_request.application_index_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.application_index));
- request->parameters.change_context_switch_status_request.application_index = application_index;
-
- /* dynamic_batch_size */
- request->parameters.change_context_switch_status_request.dynamic_batch_size_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.dynamic_batch_size));
- request->parameters.change_context_switch_status_request.dynamic_batch_size = dynamic_batch_size;
-
- /* dynamic_batch_size */
- request->parameters.change_context_switch_status_request.keep_nn_config_during_reset_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.keep_nn_config_during_reset));
- request->parameters.change_context_switch_status_request.keep_nn_config_during_reset = keep_nn_config_during_reset;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_enable(
- CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t cpu_id,
- bool should_enable)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
- CONTROL_PROTOCOL__OPCODE_t opcode = HAILO_CONTROL_OPCODE_COUNT;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- if (CPU_ID_CORE_CPU < cpu_id){
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
- goto exit;
- }
-
- opcode = (CPU_ID_CORE_CPU == cpu_id) ? HAILO_CONTROL_OPCODE_CORE_WD_ENABLE : HAILO_CONTROL_OPCODE_APP_WD_ENABLE;
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__wd_enable_request_t);
- control_protocol__pack_request_header(request, sequence, opcode, 1);
-
- request->parameters.wd_enable_request.should_enable_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.wd_enable_request.should_enable));
- request->parameters.wd_enable_request.should_enable = should_enable;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_config(
- CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t cpu_id,
- uint32_t wd_cycles,
- CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
- CONTROL_PROTOCOL__OPCODE_t opcode = HAILO_CONTROL_OPCODE_COUNT;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
- if (CPU_ID_CORE_CPU < cpu_id){
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
- goto exit;
- }
-
- opcode = (CPU_ID_CORE_CPU == cpu_id) ? HAILO_CONTROL_OPCODE_CORE_WD_CONFIG : HAILO_CONTROL_OPCODE_APP_WD_CONFIG;
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__wd_config_request_t);
- control_protocol__pack_request_header(request, sequence, opcode, 2);
-
- request->parameters.wd_config_request.wd_cycles_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.wd_config_request.wd_cycles));
- request->parameters.wd_config_request.wd_cycles = BYTE_ORDER__htonl(wd_cycles);
- request->parameters.wd_config_request.wd_mode_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.wd_config_request.wd_mode));
- request->parameters.wd_config_request.wd_mode = static_cast<uint8_t>(wd_mode);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request(
- CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- *request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- control_protocol__pack_empty_request(request, request_size, sequence,
- HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_CLEAR_CONFIGURED_APPS);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_previous_system_state(
- CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t cpu_id)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
- CONTROL_PROTOCOL__OPCODE_t opcode = HAILO_CONTROL_OPCODE_COUNT;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
- if (CPU_ID_CORE_CPU < cpu_id){
- status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
- goto exit;
- }
-
- opcode = (CPU_ID_CORE_CPU == cpu_id) ? HAILO_CONTROL_OPCODE_CORE_PREVIOUS_SYSTEM_STATE : HAILO_CONTROL_OPCODE_APP_PREVIOUS_SYSTEM_STATE;
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- *request_size = local_request_size;
- control_protocol__pack_empty_request(request, request_size, sequence, opcode);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_dataflow_interrupt_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t interrupt_type, uint8_t interrupt_index, uint8_t interrupt_sub_index)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__set_dataflow_interrupt_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_DATAFLOW_INTERRUPT, 3);
-
- /* Interrupt_type */
- request->parameters.set_dataflow_interrupt_request.interrupt_type_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_type));
- memcpy(&(request->parameters.set_dataflow_interrupt_request.interrupt_type),
- &(interrupt_type),
- sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_type));
-
- /* Interrupt_index */
- request->parameters.set_dataflow_interrupt_request.interrupt_index_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_index));
- memcpy(&(request->parameters.set_dataflow_interrupt_request.interrupt_index),
- &(interrupt_index),
- sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_index));
-
- /* Interrupt_sub_index */
- request->parameters.set_dataflow_interrupt_request.interrupt_sub_index_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_sub_index));
- memcpy(&(request->parameters.set_dataflow_interrupt_request.interrupt_sub_index),
- &(interrupt_sub_index),
- sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_sub_index));
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_set_host_info_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t connection_type, uint16_t host_port, uint32_t host_ip_address)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__d2h_event_manager_set_new_host_info_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_D2H_EVENT_MANAGER_SET_HOST_INFO, 3);
-
- /* connection_type */
- request->parameters.d2h_event_manager_set_new_host_info_request.connection_type_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_set_new_host_info_request.connection_type));
- request->parameters.d2h_event_manager_set_new_host_info_request.connection_type = connection_type;
-
-
- /* remote_port */
- request->parameters.d2h_event_manager_set_new_host_info_request.host_port_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_set_new_host_info_request.host_port));
- request->parameters.d2h_event_manager_set_new_host_info_request.host_port = BYTE_ORDER__htons(host_port);
-
-
- /* remote_ip_address */
- request->parameters.d2h_event_manager_set_new_host_info_request.host_ip_address_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_set_new_host_info_request.host_ip_address));
- request->parameters.d2h_event_manager_set_new_host_info_request.host_ip_address = BYTE_ORDER__htonl(host_ip_address);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_send_host_info_event_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t event_priority)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__d2h_event_manager_send_host_info_event_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_D2H_EVENT_MANAGER_SEND_EVENT_HOST_INFO, 1);
-
- /* event_priority */
- request->parameters.d2h_event_manager_send_host_info_event_request.priority_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_send_host_info_event_request.priority));
- request->parameters.d2h_event_manager_send_host_info_event_request.priority = event_priority;
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_chip_temperature_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_CHIP_TEMPERATURE);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_board_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__read_board_config_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_READ_BOARD_CONFIG, 2);
-
- /* Address */
- request->parameters.read_board_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_board_config_request.address));
- request->parameters.read_board_config_request.address = BYTE_ORDER__htonl(address);
-
- /* Data count */
- request->parameters.read_board_config_request.data_count_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_board_config_request.data_count));
- request->parameters.read_board_config_request.data_count = BYTE_ORDER__htonl(data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_board_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
- uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_board_config_request_t) + data_length;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_BOARD_CONFIG, 2);
-
- /* Address */
- request->parameters.write_board_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_board_config_request.address));
- request->parameters.write_board_config_request.address = BYTE_ORDER__htonl(address);
-
- /* Data */
- request->parameters.write_board_config_request.data_length = BYTE_ORDER__htonl(data_length);
-
- memcpy(&(request->parameters.write_board_config_request.data), data, data_length);
-
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_enable_debugging_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_rma)
-{
- /* Header */
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_ENABLE_DEBUGGING, 1);
-
- /* is_rma */
- request->parameters.enable_debugging_request.is_rma_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.enable_debugging_request.is_rma));
- request->parameters.enable_debugging_request.is_rma = is_rma;
-
- *request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__enable_debugging_request_t);
-
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_extended_device_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_DEVICE_INFORMATION);
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_health_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_HEALTH_INFORMATION);
-}
-
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_breakpoint_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t breakpoint_id,
- CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size) || (NULL == breakpoint_data)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__config_context_switch_breakpoint_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_CONTEXT_SWITCH_BREAKPOINT, 3);
-
- /* breakpoint id */
- request->parameters.config_context_switch_breakpoint_request.breakpoint_id_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_id));
- request->parameters.config_context_switch_breakpoint_request.breakpoint_id = breakpoint_id;
-
- /* breakpoint status */
- request->parameters.config_context_switch_breakpoint_request.breakpoint_control_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_control));
- request->parameters.config_context_switch_breakpoint_request.breakpoint_control = (uint8_t)breakpoint_control;
-
- /* breakpoint data */
- request->parameters.config_context_switch_breakpoint_request.breakpoint_data_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_data));
- memcpy(&(request->parameters.config_context_switch_breakpoint_request.breakpoint_data),
- breakpoint_data,
- sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_data));
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_breakpoint_status_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t breakpoint_id)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__get_context_switch_breakpoint_status_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_GET_CONTEXT_SWITCH_BREAKPOINT_STATUS, 1);
-
- /* breakpoint id */
- request->parameters.config_context_switch_breakpoint_request.breakpoint_id_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_id));
- request->parameters.config_context_switch_breakpoint_request.breakpoint_id = breakpoint_id;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_main_header_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_GET_CONTEXT_SWITCH_MAIN_HEADER, 0);
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_timestamp_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint16_t batch_index, bool enable_user_configuration)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__config_context_switch_timestamp_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_CONTEXT_SWITCH_TIMESTAMP, 2);
-
- /* batch index */
- request->parameters.config_context_switch_timestamp_request.batch_index_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_timestamp_request.batch_index));
- request->parameters.config_context_switch_timestamp_request.batch_index = BYTE_ORDER__htons(batch_index);
-
- /* enable_user_configuration */
- request->parameters.config_context_switch_timestamp_request.enable_user_configuration_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_timestamp_request.enable_user_configuration));
- request->parameters.config_context_switch_timestamp_request.enable_user_configuration = enable_user_configuration;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_run_bist_test_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_top_test,
- uint32_t top_bypass_bitmap, uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1)
-{
- size_t local_request_size = 0;
-
- CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
- CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__run_bist_test_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_RUN_BIST_TEST, 5);
-
- /* running on top */
- request->parameters.run_bist_test_request.is_top_test_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.is_top_test));
- request->parameters.run_bist_test_request.is_top_test = is_top_test;
-
- /* top bypass */
- request->parameters.run_bist_test_request.top_bypass_bitmap_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.top_bypass_bitmap));
- request->parameters.run_bist_test_request.top_bypass_bitmap = BYTE_ORDER__htonl(top_bypass_bitmap);
-
- /* cluster index */
- request->parameters.run_bist_test_request.cluster_index_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.cluster_index));
- request->parameters.run_bist_test_request.cluster_index = cluster_index;
-
- /* cluster bypass 0 */
- request->parameters.run_bist_test_request.cluster_bypass_bitmap_0_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.cluster_bypass_bitmap_0));
- request->parameters.run_bist_test_request.cluster_bypass_bitmap_0 = BYTE_ORDER__htonl(cluster_bypass_bitmap_0);
-
- /* cluster bypass 1 */
- request->parameters.run_bist_test_request.cluster_bypass_bitmap_1_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.cluster_bypass_bitmap_1));
- request->parameters.run_bist_test_request.cluster_bypass_bitmap_1 = BYTE_ORDER__htonl(cluster_bypass_bitmap_1);
-
- *request_size = local_request_size;
-
- return HAILO_COMMON_STATUS__SUCCESS;
-}
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_sleep_state_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t sleep_state)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
- size_t local_request_size = 0;
-
- if ((NULL == request) || (NULL == request_size)) {
- status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
- goto exit;
- }
-
- /* Header */
- local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
- sizeof(CONTROL_PROTOCOL__set_sleep_state_request_t);
- control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_SLEEP_STATE, 1);
-
- /* sleep_state */
- request->parameters.set_sleep_state_request.sleep_state_length =
- BYTE_ORDER__htonl(sizeof(request->parameters.set_sleep_state_request.sleep_state));
- request->parameters.set_sleep_state_request.sleep_state = sleep_state;
-
- *request_size = local_request_size;
- status = HAILO_COMMON_STATUS__SUCCESS;
-exit:
- return status;
-}
-
-#endif /* FIRMWARE_ARCH */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file control_protocol.hpp
- * @brief Contains Defines and declarations related to control protocl
- **/
-
-#ifndef _CONTROL_PROTOCOL_HPP_
-#define _CONTROL_PROTOCOL_HPP_
-
-#include "control_protocol.h"
-#include "firmware_status.h"
-#include "hailo/hailort.h"
-#include <stdint.h>
-
-typedef enum {
- HAILO8_CLOCK_RATE = 400 * 1000 * 1000,
- HAILO8R_CLOCK_RATE = 200 * 1000 * 1000
-} CONTROL_PROTOCOL__HAILO8_CLOCK_RATE_t;
-
-typedef struct {
- uint8_t stream_index;
- uint8_t is_input;
- uint32_t communication_type;
- uint8_t skip_nn_stream_config;
- uint8_t power_mode; // CONTROL_PROTOCOL__power_mode_t
- CONTROL_PROTOCOL__nn_stream_config_t nn_stream_config;
- CONTROL_PROTOCOL__communication_config_prams_t communication_params;
-} CONTROL_PROTOCOL__config_stream_params_t;
-
-static_assert(sizeof(CONTROL_PROTOCOL__context_switch_context_index_t) <= UINT8_MAX,
- "CONTROL_PROTOCOL__context_switch_context_index_t must fit in uint8_t");
-
-/* End of context switch structs */
-
-const char *CONTROL_PROTOCOL__get_textual_opcode(CONTROL_PROTOCOL__OPCODE_t opcode);
-
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__parse_response(uint8_t *message,
- uint32_t message_size,
- CONTROL_PROTOCOL__response_header_t **header,
- CONTROL_PROTOCOL__payload_t **payload,
- CONTROL_PROTOCOL__status_t *fw_status);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__get_sequence_from_response_buffer(uint8_t *response_buffer,
- size_t response_buffer_size, uint32_t *sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_core_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_fw_logger_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, hailo_fw_logger_level_t level, uint8_t interface_mask);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_open_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_close_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__reset_type_t reset_type);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, bool should_clear);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t delay_milliseconds, CONTROL_PROTOCOL__averaging_factor_t averaging_factor , CONTROL_PROTOCOL__sampling_period_t sampling_period);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_stop_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_finish_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t offset, const uint8_t *data, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_validate_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, MD5_SUM_t *expected_md5, uint32_t firmware_size);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_examine_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_erase_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_phy_operation_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__phy_operation_t operation_type);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_core_top_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_core_top_type_t config_type, CONTROL_PROTOCOL__config_core_top_params_t *params);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_write_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
- uint32_t sequence, uint32_t offset, uint8_t endianness,
- uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, const uint8_t *data, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
- uint32_t sequence, uint32_t offset, uint8_t endianness,
- uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, uint32_t data_length, bool should_hold_bus);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t latency_measurement_en, uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index, uint32_t outbound_stream_index);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_store_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t is_first, uint32_t section_index,
- uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size, uint8_t *data, uint32_t data_length,
- uint16_t config_height, uint16_t config_width, uint16_t config_fps, uint32_t config_name_length, uint8_t *config_name);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint32_t section_index, uint32_t offset, uint32_t data_length);
-hailo_status CONTROL_PROTOCOL__pack_sensor_set_i2c_bus_index_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t sensor_type, uint32_t bus_index);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_load_and_start_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_set_generic_i2c_slave_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint16_t slave_address,
- uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_sections_info_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_network_group_header_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- const CONTROL_PROTOCOL__application_header_t *network_group_header);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_context_info_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- const CONTROL_PROTOCOL__context_switch_context_info_single_control_t *context_info);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t measurement_enable);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_download_context_action_list_request(CONTROL_PROTOCOL__request_t *request,
- size_t *request_size, uint32_t sequence, uint32_t network_group_id,
- CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, uint16_t action_list_offset);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status, uint8_t application_index,
- uint16_t dynamic_batch_size, bool keep_nn_config_during_reset);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_enable(
- CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t cpu_id,
- bool should_enable);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_config(
- CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t cpu_id,
- uint32_t wd_cycles,
- CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_previous_system_state(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t cpu_id);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_dataflow_interrupt_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t interrupt_type, uint8_t interrupt_index, uint8_t interrupt_sub_index);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_set_host_info_request( CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t connection_type, uint16_t host_port, uint32_t host_ip_address);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_send_host_info_event_request( CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t event_priority);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_chip_temperature_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_board_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_board_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_enable_debugging_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_rma);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_extended_device_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_breakpoint_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t breakpoint_id,
- CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_breakpoint_status_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint8_t breakpoint_id);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_main_header_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint32_t offset, uint8_t *data, uint32_t data_length);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__copy_second_stage_to_flash_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- MD5_SUM_t *expected_md5, uint32_t second_stage_size);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_pause_frames_request(CONTROL_PROTOCOL__request_t *request,
- size_t *request_size,
- uint32_t sequence,
- uint8_t rx_pause_frames_enable);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_timestamp_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint16_t batch_index, bool enable_user_configuration);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_run_bist_test_request(
- CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_top_test,
- uint32_t top_bypass_bitmap, uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_clock_freq_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
- uint32_t clock_freq);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_health_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool should_activate);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool should_activate);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_hw_consts_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
-HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_sleep_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t sleep_state);
-
-#endif /* _CONTROL_PROTOCOL_HPP_ */
\ No newline at end of file
+++ /dev/null
-#include "core_device.hpp"
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "common/logger_macros.hpp"
-#include "context_switch/multi_context/vdma_config_manager.hpp"
-#include "md5.h"
-
-#include <memory>
-
-static const std::string CORE_DRIVER_PATH = "/dev/hailo_core";
-
-namespace hailort
-{
-
-bool CoreDevice::is_loaded()
-{
-#if defined(_MSC_VER)
- // windows is not supported for core driver
- return false;
-#else
- return (access(CORE_DRIVER_PATH.c_str(), F_OK) == 0);
-#endif // defined(_MSC_VER)
-}
-
-Expected<std::unique_ptr<CoreDevice>> CoreDevice::create()
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- auto driver = HailoRTDriver::create(CORE_DRIVER_PATH);
- CHECK_EXPECTED(driver, "Failed to initialize HailoRTDriver");
-
- auto device = std::unique_ptr<CoreDevice>(new (std::nothrow) CoreDevice(driver.release(), status, DEVICE_ID));
- CHECK_AS_EXPECTED((nullptr != device), HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating CoreDevice");
-
- return device;
-}
-
-
-CoreDevice::CoreDevice(HailoRTDriver &&driver, hailo_status &status, const std::string &device_id) :
- VdmaDevice::VdmaDevice(std::move(driver), Device::Type::CORE, device_id)
-{
- status = update_fw_state();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("update_fw_state() failed with status {}", status);
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-Expected<hailo_device_architecture_t> CoreDevice::get_architecture() const {
- return Expected<hailo_device_architecture_t>(m_device_architecture);
-}
-
-hailo_status CoreDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
-{
- if (CONTROL_PROTOCOL__RESET_TYPE__NN_CORE == reset_type) {
- return m_driver.reset_nn_core();
- }
-
- LOGGER__ERROR("Can't reset CoreDevice, please use linux reboot");
- return HAILO_NOT_IMPLEMENTED;
-}
-
-Expected<size_t> CoreDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
-{
- if (hailo_cpu_id_t::HAILO_CPU_ID_0 == cpu_id) {
- LOGGER__ERROR("Read FW log is supported only on core CPU");
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
-
- return VdmaDevice::read_log(buffer, cpu_id);
-}
-
-} /* namespace hailort */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file core_device
- * @brief Device used by Hailo-15
- *
- **/
-
-#ifndef _HAILO_CORE_DEVICE_HPP_
-#define _HAILO_CORE_DEVICE_HPP_
-
-#include "hailo/expected.hpp"
-#include "hailo/hailort.h"
-#include "vdma_device.hpp"
-
-#include <memory>
-
-namespace hailort
-{
-
-class CoreDevice : public VdmaDevice {
-public:
- virtual ~CoreDevice() = default;
- static bool is_loaded();
- static Expected<std::unique_ptr<CoreDevice>> create();
-
- virtual Expected<hailo_device_architecture_t> get_architecture() const override;
- virtual const char* get_dev_id() const override {return DEVICE_ID;}
- Expected<size_t> read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id);
-
- virtual bool is_stream_interface_supported(const hailo_stream_interface_t &stream_interface) const override
- {
- switch (stream_interface) {
- case HAILO_STREAM_INTERFACE_CORE:
- return true;
- case HAILO_STREAM_INTERFACE_PCIE:
- case HAILO_STREAM_INTERFACE_ETH:
- case HAILO_STREAM_INTERFACE_MIPI:
- return false;
- default:
- LOGGER__ERROR("Invalid stream interface");
- return false;
- }
- }
-
- static constexpr const char *DEVICE_ID = "[core]";
-
-protected:
- virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
-
-private:
- CoreDevice(HailoRTDriver &&driver, hailo_status &status, const std::string &device_id);
-};
-
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CORE_DEVICE_HPP_ */
\ No newline at end of file
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/core_op.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/resource_manager.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/resource_manager_builder.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/config_buffer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/inter_context_buffer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/ddr_channels_pair.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/channel_allocator.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/resource_manager/context_switch_buffer_builder.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} ${HAILORT_OPS_CPP_SOURCES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file active_core_op_holder.hpp
+ * @brief place_holder stored in ConfigManager indicating which CoreOp is currently active
+ *
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_ACTIVE_CORE_OP_HOLDER_HPP_
+#define _HAILO_CONTEXT_SWITCH_ACTIVE_CORE_OP_HOLDER_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/utils.hpp"
+
+#include "core_op/core_op.hpp"
+
+
+namespace hailort
+{
+
+class CoreOp;
+
+class ActiveCoreOpHolder final
+{
+ public:
+ ActiveCoreOpHolder() : m_core_op(nullptr) {}
+
+ ExpectedRef<CoreOp> get()
+ {
+ CHECK_NOT_NULL_AS_EXPECTED(m_core_op, HAILO_INVALID_OPERATION);
+ return std::ref(*m_core_op);
+ }
+ void set(CoreOp &core_op)
+ {
+ assert(!is_any_active());
+ m_core_op = &core_op;
+ }
+
+ bool is_any_active() { return nullptr != m_core_op; }
+
+ void clear() { m_core_op = nullptr; }
+
+ ActiveCoreOpHolder(ActiveCoreOpHolder&) = delete;
+ ActiveCoreOpHolder& operator=(ActiveCoreOpHolder&) = delete;
+ ActiveCoreOpHolder& operator=(ActiveCoreOpHolder&&) = delete;
+ ActiveCoreOpHolder(ActiveCoreOpHolder&&) = default;
+ private:
+ CoreOp *m_core_op;
+};
+
+} /* namespace hailort */
+
+#endif //_HAILO_CONTEXT_SWITCH_ACTIVE_CORE_OP_HOLDER_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file core_op.cpp
+ * @brief Core-Op module implementation
+ **/
+
+#include "hailo/network_group.hpp"
+#include "hailo/transform.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/utils.hpp"
+#include "common/runtime_statistics_internal.hpp"
+
+#include "core_op/core_op.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+#include "hef/hef_internal.hpp"
+#include "eth/eth_stream.hpp"
+#include "vdma/vdma_stream_base.hpp"
+#include "mipi/mipi_stream.hpp"
+#include "device_common/control_protocol.hpp"
+
+
+namespace hailort
+{
+
+ActivatedCoreOp::ActivatedCoreOp(const hailo_activate_network_group_params_t &network_group_params,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ EventPtr &&core_op_activated_event, hailo_status &status) :
+ m_network_group_params(network_group_params),
+ m_core_op_activated_event(std::move(core_op_activated_event)),
+ m_input_streams(input_streams),
+ m_output_streams(output_streams)
+{
+ status = validate_network_group_params(network_group_params);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to validate network_group params");
+ return;
+ }
+}
+
+uint32_t ActivatedCoreOp::get_invalid_frames_count()
+{
+ uint32_t total_invalid_frames_count = 0;
+ for (auto& name_stream_pair : m_output_streams) {
+ total_invalid_frames_count += name_stream_pair.second->get_invalid_frames_count();
+ }
+ return total_invalid_frames_count;
+}
+
+// TODO: Implement function (HRT-3174)
+hailo_status ActivatedCoreOp::validate_network_group_params(
+ const hailo_activate_network_group_params_t &/*network_group_params*/)
+{
+ return HAILO_SUCCESS;
+}
+
+CoreOp::CoreOp(
+ const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status) :
+ m_config_params(config_params),
+ m_min_configured_batch_size(get_smallest_configured_batch_size(config_params)),
+ m_activation_time_accumulator(),
+ m_deactivation_time_accumulator(),
+ m_metadata(metadata)
+{
+ auto event = Event::create_shared(Event::State::not_signalled);
+ if (nullptr == event) {
+ LOGGER__ERROR("Failed to create activation event");
+ status = HAILO_INTERNAL_FAILURE;
+ return;
+ }
+ m_core_op_activated_event = std::move(std::move(event));
+
+ m_activation_time_accumulator = make_shared_nothrow<FullAccumulator<double>>("activation_time");
+ if (nullptr == m_activation_time_accumulator) {
+ LOGGER__ERROR("Failed to create activation time accumulator");
+ status = HAILO_OUT_OF_HOST_MEMORY;
+ return;
+ };
+
+ m_deactivation_time_accumulator = make_shared_nothrow<FullAccumulator<double>>("deactivation_time");
+ if (nullptr == m_deactivation_time_accumulator) {
+ LOGGER__ERROR("Failed to create deactivation time accumulator");
+ status = HAILO_OUT_OF_HOST_MEMORY;
+ return;
+ };
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> CoreOp::activate(const hailo_activate_network_group_params_t &network_group_params)
+{
+ static const auto RESET_PENDING_STREAM_TRANSFERS = false;
+ return create_activated_network_group(network_group_params, CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE,
+ RESET_PENDING_STREAM_TRANSFERS);
+}
+
+Expected<std::chrono::nanoseconds> get_latency(LatencyMeterPtr &latency_meter, bool clear)
+{
+ auto hw_latency = latency_meter->get_latency(clear);
+ if (HAILO_NOT_AVAILABLE == hw_latency.status()) {
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+ CHECK_EXPECTED(hw_latency, "Failed getting latency");
+ return hw_latency.release();
+}
+
+/* Network group base functions */
+Expected<LatencyMeasurementResult> CoreOp::get_latency_measurement(const std::string &network_name)
+{
+ bool clear = ((m_config_params.latency & HAILO_LATENCY_CLEAR_AFTER_GET) == HAILO_LATENCY_CLEAR_AFTER_GET);
+ LatencyMeasurementResult result = {};
+
+ auto latency_meters_exp = get_latency_meters();
+ CHECK_EXPECTED(latency_meters_exp);
+ auto latency_meters = latency_meters_exp.release();
+
+ if (network_name.empty()) {
+ std::chrono::nanoseconds latency_sum(0);
+ uint32_t measurements_count = 0;
+ for (auto &latency_meter_pair : *latency_meters.get()) {
+ auto hw_latency = get_latency(latency_meter_pair.second, clear);
+ if (HAILO_NOT_AVAILABLE == hw_latency.status()) {
+ continue;
+ }
+ CHECK_EXPECTED(hw_latency);
+ latency_sum += hw_latency.value();
+ measurements_count++;
+ }
+ if (0 == measurements_count) {
+ LOGGER__DEBUG("No latency measurements was found");
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+ result.avg_hw_latency = latency_sum / measurements_count;
+ } else {
+ if(!contains(*latency_meters, network_name)) {
+ LOGGER__DEBUG("No latency measurements was found for network {}", network_name);
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+ auto hw_latency = get_latency(latency_meters->at(network_name), clear);
+ if (HAILO_NOT_AVAILABLE == hw_latency.status()) {
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+ CHECK_EXPECTED(hw_latency);
+ result.avg_hw_latency = hw_latency.value();
+ }
+ return result;
+}
+
+Expected<OutputStreamWithParamsVector> CoreOp::get_output_streams_from_vstream_names(
+ const std::map<std::string, hailo_vstream_params_t> &outputs_params)
+{
+ OutputStreamWithParamsVector results;
+ std::unordered_map<std::string, hailo_vstream_params_t> outputs_edges_params;
+ for (auto &name_params_pair : outputs_params) {
+ auto stream_names = m_metadata->get_stream_names_from_vstream_name(name_params_pair.first);
+ CHECK_EXPECTED(stream_names);
+
+ for (auto &stream_name : stream_names.value()) {
+ CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND);
+ auto output_stream = m_output_streams.at(stream_name);
+ if (output_stream->get_info().is_mux) {
+ outputs_edges_params.emplace(name_params_pair);
+ }
+ else {
+ NameToVStreamParamsMap name_to_params = {name_params_pair};
+ results.emplace_back(output_stream, name_to_params);
+ }
+ }
+ }
+ // Add non mux streams to result
+ hailo_status status = add_mux_streams_by_edges_names(results, outputs_edges_params);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return results;
+}
+
+// This function adds to results the OutputStreams that correspond to the edges in outputs_edges_params.
+// If an edge name appears in outputs_edges_params then all of its predecessors must appear in outputs_edges_params as well, Otherwise, an error is returned.
+// We use the set seen_edges in order to mark the edges already evaluated by one of its' predecessor.
+hailo_status CoreOp::add_mux_streams_by_edges_names(OutputStreamWithParamsVector &results,
+ const std::unordered_map<std::string, hailo_vstream_params_t> &outputs_edges_params)
+{
+ std::unordered_set<std::string> seen_edges;
+ for (auto &name_params_pair : outputs_edges_params) {
+ if (seen_edges.end() != seen_edges.find(name_params_pair.first)) {
+ // Edge has already been seen by one of its predecessors
+ continue;
+ }
+ auto output_streams = get_output_streams_by_vstream_name(name_params_pair.first);
+ CHECK_EXPECTED_AS_STATUS(output_streams);
+ CHECK(output_streams->size() == 1, HAILO_INVALID_ARGUMENT,
+ "mux streams cannot be separated into multiple streams");
+ auto output_stream = output_streams.release()[0];
+
+ // TODO: Find a better way to get the mux edges without creating OutputDemuxer
+ auto expected_demuxer = OutputDemuxer::create(*output_stream);
+ CHECK_EXPECTED_AS_STATUS(expected_demuxer);
+
+ NameToVStreamParamsMap name_to_params;
+ for (auto &edge : expected_demuxer.value()->get_edges_stream_info()) {
+ auto edge_name_params_pair = outputs_edges_params.find(edge.name);
+ CHECK(edge_name_params_pair != outputs_edges_params.end(), HAILO_INVALID_ARGUMENT,
+ "All edges of stream {} must be in output vstream params. edge {} is missing.",
+ name_params_pair.first, edge.name);
+ seen_edges.insert(edge.name);
+ name_to_params.insert(*edge_name_params_pair);
+ }
+ results.emplace_back(output_stream, name_to_params);
+ }
+ return HAILO_SUCCESS;
+}
+
+Expected<OutputStreamPtrVector> CoreOp::get_output_streams_by_vstream_name(const std::string &name)
+{
+ auto stream_names = m_metadata->get_stream_names_from_vstream_name(name);
+ CHECK_EXPECTED(stream_names);
+
+ OutputStreamPtrVector output_streams;
+ output_streams.reserve(stream_names->size());
+ for (const auto &stream_name : stream_names.value()) {
+ CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND);
+ output_streams.emplace_back(m_output_streams.at(stream_name));
+ }
+
+ return output_streams;
+}
+
+Expected<LayerInfo> CoreOp::get_layer_info(const std::string &stream_name)
+{
+ for (auto layer_info : m_metadata->get_all_layer_infos()) {
+ if (layer_info.name == stream_name) {
+ return layer_info;
+ }
+ }
+ LOGGER__ERROR("Failed to find layer with name {}", stream_name);
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+uint16_t CoreOp::get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params)
+{
+ // There are two possible situations:
+ // 1) All networks in the network group have the same configured (and hence smallest) batch_size =>
+ // We return that batch size.
+ // 2) Not all of the networks have the same configured (and hence smallest) batch_size. Currently, when
+ // using dynamic_batch_sizes, all networks will use the same dynamic_batch_size (until HRT-6535 is done).
+ // Hence, we must not set a dynamic_batch_size to a value greater than the smallest configured network
+ // batch_size (e.g. all the resources allocated are for at most the configured network batch_size).
+
+ /* We iterate over all network's batch_sizes to get the non-default min.
+ Ignoring HAILO_DEFAULT_BATCH_SIZE as it is not a real batch-value,
+ but indicating the scheduler should optimize batches by himself */
+ uint16_t min_batch_size = UINT16_MAX;
+ for (const auto &network_params_pair : config_params.network_params_by_name) {
+ if ((HAILO_DEFAULT_BATCH_SIZE != network_params_pair.second.batch_size) &&
+ (network_params_pair.second.batch_size < min_batch_size)) {
+ min_batch_size = network_params_pair.second.batch_size;
+ }
+ }
+ return (UINT16_MAX == min_batch_size) ? DEFAULT_ACTUAL_BATCH_SIZE : min_batch_size;
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> CoreOp::activate_with_batch(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ return create_activated_network_group(HailoRTDefaults::get_active_network_group_params(), dynamic_batch_size,
+ resume_pending_stream_transfers);
+}
+
+const std::string &CoreOp::name() const
+{
+ return m_metadata->core_op_name();
+}
+
+hailo_status CoreOp::activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ for (auto &name_pair : m_input_streams) {
+ auto status = name_pair.second->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status);
+ }
+ for (auto &name_pair : m_output_streams) {
+ auto status = name_pair.second->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOp::deactivate_low_level_streams()
+{
+ // Best effort
+ auto status = HAILO_SUCCESS;
+ auto deactivate_status = HAILO_UNINITIALIZED;
+ for (auto &name_pair : m_input_streams) {
+ deactivate_status = name_pair.second->deactivate_stream();
+ if (HAILO_SUCCESS != deactivate_status) {
+ LOGGER__ERROR("Failed to deactivate input stream {}", name_pair.first);
+ status = deactivate_status;
+ }
+ }
+ for (auto &name_pair : m_output_streams) {
+ deactivate_status = name_pair.second->deactivate_stream();
+ if (HAILO_SUCCESS != deactivate_status) {
+ LOGGER__ERROR("Failed to deactivate output stream {}", name_pair.first);
+ status = deactivate_status;
+ }
+ }
+
+ return status;
+}
+
+Expected<std::vector<std::string>> CoreOp::get_vstream_names_from_stream_name(const std::string &stream_name)
+{
+ return m_metadata->get_vstream_names_from_stream_name(stream_name);
+}
+
+const SupportedFeatures &CoreOp::get_supported_features()
+{
+ return m_metadata->supported_features();
+}
+
+Expected<uint16_t> CoreOp::get_stream_batch_size(const std::string &stream_name)
+{
+ for (const auto &layer_info : m_metadata->get_all_layer_infos()) {
+ if (layer_info.name == stream_name) {
+ for (auto const &network_params_pair : m_config_params.network_params_by_name) {
+ if (network_params_pair.first == layer_info.network_name) {
+ auto batch_size = network_params_pair.second.batch_size;
+ return batch_size;
+ }
+ }
+ }
+ }
+ LOGGER__ERROR("Failed to find network name output stream {}", stream_name);
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+bool CoreOp::is_multi_context() const
+{
+ return m_metadata->supported_features().multi_context;
+}
+
+const ConfigureNetworkParams CoreOp::get_config_params() const
+{
+ return m_config_params;
+}
+
+hailo_status CoreOp::create_input_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
+{
+ auto layer_info = get_layer_info(stream_name);
+ CHECK_EXPECTED_AS_STATUS(layer_info);
+
+ CHECK(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
+ "Device does not supports the given stream interface streams. Please update input_stream_params for stream {}.",
+ stream_name);
+
+ switch (stream_params.stream_interface) {
+ case HAILO_STREAM_INTERFACE_PCIE:
+ // Fallthrough
+ case HAILO_STREAM_INTERFACE_INTEGRATED:
+ return create_vdma_input_stream(device, stream_name, layer_info.value(), stream_params);
+
+ case HAILO_STREAM_INTERFACE_ETH:
+ {
+ auto input_stream = EthernetInputStream::create(device,
+ layer_info.value(), stream_params.eth_input_params, m_core_op_activated_event);
+ CHECK_EXPECTED_AS_STATUS(input_stream);
+ m_input_streams.insert(make_pair(stream_name, input_stream.release()));
+ return HAILO_SUCCESS;
+ }
+
+ case HAILO_STREAM_INTERFACE_MIPI:
+ {
+ auto input_stream = MipiInputStream::create(device,
+ layer_info.value(), stream_params.mipi_input_params, m_core_op_activated_event);
+ CHECK_EXPECTED_AS_STATUS(input_stream);
+ m_input_streams.insert(make_pair(stream_name, input_stream.release()));
+ return HAILO_SUCCESS;
+ }
+
+ default:
+ LOGGER__ERROR("{} interface is not supported.", stream_params.stream_interface);
+ return HAILO_NOT_IMPLEMENTED;
+ }
+}
+
+hailo_status CoreOp::create_vdma_input_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
+{
+ // Make sure the downcast is safe
+ CHECK((Device::Type::INTEGRATED == device.get_type()) || (Device::Type::PCIE == device.get_type()),
+ HAILO_INTERNAL_FAILURE, "Invalid device type");
+ VdmaDevice *vdma_device = reinterpret_cast<VdmaDevice*>(&device);
+
+ auto batch_size_exp = get_stream_batch_size(stream_name);
+ CHECK_EXPECTED_AS_STATUS(batch_size_exp);
+ auto vdma_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(stream_name);
+ CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr_exp, "Failed to get vdma channel for output stream {}", stream_name);
+
+ auto input_stream = VdmaInputStreamBase::create(stream_params.stream_interface, *vdma_device, vdma_channel_ptr_exp.value(),
+ layer_info, stream_params, batch_size_exp.value(), m_core_op_activated_event);
+ CHECK_EXPECTED_AS_STATUS(input_stream);
+ m_input_streams.insert(make_pair(stream_name, input_stream.release()));
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOp::create_output_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
+{
+ auto layer_info = get_layer_info(stream_name);
+ CHECK_EXPECTED_AS_STATUS(layer_info);
+
+ CHECK(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
+ "Device does not supports the given stream interface streams. Please update input_stream_params for stream {}.",
+ stream_name);
+
+ switch (stream_params.stream_interface) {
+ case HAILO_STREAM_INTERFACE_PCIE:
+ // Fallthrough
+ case HAILO_STREAM_INTERFACE_INTEGRATED:
+ return create_vdma_output_stream(device, stream_name, layer_info.value(), stream_params);
+
+ case HAILO_STREAM_INTERFACE_ETH:
+ {
+ auto output_stream = EthernetOutputStream::create(device,
+ layer_info.value(), stream_params.eth_output_params,
+ m_core_op_activated_event);
+ CHECK_EXPECTED_AS_STATUS(output_stream);
+ m_output_streams.insert(make_pair(stream_name, output_stream.release()));
+ return HAILO_SUCCESS;
+ }
+
+ default:
+ LOGGER__ERROR("{} interface is not supported.", stream_params.stream_interface);
+ return HAILO_NOT_IMPLEMENTED;
+ }
+}
+
+hailo_status CoreOp::create_vdma_output_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
+{
+ // Make sure the downcast is safe
+ CHECK((Device::Type::INTEGRATED == device.get_type()) || (Device::Type::PCIE == device.get_type()),
+ HAILO_INTERNAL_FAILURE, "Invalid device type");
+ VdmaDevice *vdma_device = reinterpret_cast<VdmaDevice*>(&device);
+
+ auto batch_size_exp = get_stream_batch_size(stream_name);
+ CHECK_EXPECTED_AS_STATUS(batch_size_exp);
+ auto vdma_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(stream_name);
+ CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr_exp, "Failed to get vdma channel for output stream {}", stream_name);
+
+ auto output_stream = VdmaOutputStreamBase::create(stream_params.stream_interface, *vdma_device, vdma_channel_ptr_exp.value(),
+ layer_info, batch_size_exp.value(), stream_params, m_core_op_activated_event);
+ CHECK_EXPECTED_AS_STATUS(output_stream);
+ m_output_streams.insert(make_pair(stream_name, output_stream.release()));
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOp::create_streams_from_config_params(Device &device)
+{
+ for (const auto &stream_parameters_pair : m_config_params.stream_params_by_name) {
+ switch (stream_parameters_pair.second.direction) {
+ case HAILO_H2D_STREAM:
+ {
+ auto status = create_input_stream_from_config_params(device,
+ stream_parameters_pair.second,
+ stream_parameters_pair.first);
+ CHECK_SUCCESS(status);
+ }
+ break;
+ case HAILO_D2H_STREAM:
+ {
+ auto status = create_output_stream_from_config_params(device,
+ stream_parameters_pair.second,
+ stream_parameters_pair.first);
+ CHECK_SUCCESS(status);
+ }
+ break;
+ default:
+ LOGGER__ERROR("stream name {} direction is invalid.", stream_parameters_pair.first);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<InputStreamRefVector> CoreOp::get_input_streams_by_network(const std::string &network_name)
+{
+ auto input_stream_infos = m_metadata->get_input_stream_infos(network_name);
+ CHECK_EXPECTED(input_stream_infos);
+
+ InputStreamRefVector result;
+ for (auto &stream_info : input_stream_infos.value()) {
+ auto stream_ref = get_input_stream_by_name(stream_info.name);
+ CHECK_EXPECTED(stream_ref);
+ result.push_back(stream_ref.release());
+ }
+ return result;
+}
+
+Expected<OutputStreamRefVector> CoreOp::get_output_streams_by_network(const std::string &network_name)
+{
+ auto output_stream_infos = m_metadata->get_output_stream_infos(network_name);
+ CHECK_EXPECTED(output_stream_infos);
+
+ OutputStreamRefVector result;
+ for (auto &stream_info : output_stream_infos.value()) {
+ auto stream_ref = get_output_stream_by_name(stream_info.name);
+ CHECK_EXPECTED(stream_ref);
+ result.push_back(stream_ref.release());
+ }
+ return result;
+}
+
+InputStreamRefVector CoreOp::get_input_streams()
+{
+ InputStreamRefVector result;
+ for (auto& name_stream_pair : m_input_streams) {
+ result.emplace_back(std::ref(*name_stream_pair.second));
+ }
+ return result;
+}
+
+OutputStreamRefVector CoreOp::get_output_streams()
+{
+ OutputStreamRefVector result;
+ for (auto& name_stream_pair : m_output_streams) {
+ result.emplace_back(std::ref(*name_stream_pair.second));
+ }
+ return result;
+}
+
+ExpectedRef<InputStream> CoreOp::get_input_stream_by_name(const std::string& name)
+{
+ auto iterator = m_input_streams.find(name);
+ if (m_input_streams.end() == iterator) {
+ LOGGER__ERROR("Input stream name {} not found", name);
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+
+ return std::ref<InputStream>(*iterator->second);
+}
+
+ExpectedRef<OutputStream> CoreOp::get_output_stream_by_name(const std::string& name)
+{
+ auto iterator = m_output_streams.find(name);
+ if (m_output_streams.end() == iterator) {
+ LOGGER__ERROR("Output stream name {} not found", name);
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+
+ return std::ref<OutputStream>(*iterator->second);
+}
+
+std::vector<std::reference_wrapper<InputStream>> CoreOp::get_input_streams_by_interface(
+ hailo_stream_interface_t stream_interface)
+{
+ std::vector<std::reference_wrapper<InputStream>> results;
+ for (auto &name_pair : m_input_streams) {
+ if (stream_interface == name_pair.second->get_interface()) {
+ results.push_back(std::ref(*name_pair.second));
+ }
+ }
+ return results;
+}
+
+std::vector<std::reference_wrapper<OutputStream>> CoreOp::get_output_streams_by_interface(
+ hailo_stream_interface_t stream_interface)
+{
+ std::vector<std::reference_wrapper<OutputStream>> results;
+ for (auto &name_pair : m_output_streams) {
+ if (stream_interface == name_pair.second->get_interface()) {
+ results.push_back(std::ref(*name_pair.second));
+ }
+ }
+ return results;
+}
+
+hailo_status CoreOp::wait_for_activation(const std::chrono::milliseconds &timeout)
+{
+ return m_core_op_activated_event->wait(timeout);
+}
+
+Expected<std::vector<std::vector<std::string>>> CoreOp::get_output_vstream_groups()
+{
+ std::vector<std::vector<std::string>> results;
+
+ for (auto output_stream : get_output_streams()) {
+ auto vstreams_group = get_vstream_names_from_stream_name(output_stream.get().name());
+ CHECK_EXPECTED(vstreams_group);
+ results.push_back(vstreams_group.release());
+ }
+
+ return results;
+}
+
+Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> CoreOp::make_output_vstream_params_groups(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ auto params = make_output_vstream_params(quantized, format_type, timeout_ms, queue_size);
+ CHECK_EXPECTED(params);
+
+ auto groups = get_output_vstream_groups();
+ CHECK_EXPECTED(groups);
+
+ std::vector<std::map<std::string, hailo_vstream_params_t>> results(groups->size(), std::map<std::string, hailo_vstream_params_t>());
+
+ size_t pipeline_group_index = 0;
+ for (const auto &group : groups.release()) {
+ for (const auto &name_pair : params.value()) {
+ if (contains(group, name_pair.first)) {
+ results[pipeline_group_index].insert(name_pair);
+ }
+ }
+ pipeline_group_index++;
+ }
+
+ return results;
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> CoreOp::make_input_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ auto input_vstream_infos = m_metadata->get_input_vstream_infos(network_name);
+ CHECK_EXPECTED(input_vstream_infos);
+
+ std::map<std::string, hailo_vstream_params_t> res;
+ auto status = Hef::Impl::fill_missing_vstream_params_with_default(res, input_vstream_infos.value(), quantized,
+ format_type, timeout_ms, queue_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return res;
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> CoreOp::make_output_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ auto output_vstream_infos = m_metadata->get_output_vstream_infos(network_name);
+ CHECK_EXPECTED(output_vstream_infos);
+ std::map<std::string, hailo_vstream_params_t> res;
+ auto status = Hef::Impl::fill_missing_vstream_params_with_default(res, output_vstream_infos.value(), quantized,
+ format_type, timeout_ms, queue_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return res;
+}
+
+Expected<std::vector<hailo_network_info_t>> CoreOp::get_network_infos() const
+{
+ return m_metadata->get_network_infos();
+}
+
+Expected<std::vector<hailo_stream_info_t>> CoreOp::get_all_stream_infos(
+ const std::string &network_name) const
+{
+ return m_metadata->get_all_stream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> CoreOp::get_input_vstream_infos(
+ const std::string &network_name) const
+{
+ return m_metadata->get_input_vstream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> CoreOp::get_output_vstream_infos(
+ const std::string &network_name) const
+{
+ return m_metadata->get_output_vstream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> CoreOp::get_all_vstream_infos(
+ const std::string &network_name) const
+{
+ return m_metadata->get_all_vstream_infos(network_name);
+}
+
+AccumulatorPtr CoreOp::get_activation_time_accumulator() const
+{
+ return m_activation_time_accumulator;
+}
+
+AccumulatorPtr CoreOp::get_deactivation_time_accumulator() const
+{
+ return m_deactivation_time_accumulator;
+}
+
+Expected<std::shared_ptr<InputStream>> CoreOp::get_shared_input_stream_by_name(const std::string &stream_name)
+{
+ CHECK_AS_EXPECTED(contains(m_input_streams, stream_name), HAILO_NOT_FOUND, "Input stream {} not found.");
+ auto stream_ptr = m_input_streams.at(stream_name);
+ return stream_ptr;
+}
+
+Expected<std::shared_ptr<OutputStream>> CoreOp::get_shared_output_stream_by_name(const std::string &stream_name)
+{
+ CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND, "Output stream {} not found.");
+ auto stream_ptr = m_output_streams.at(stream_name);
+ return stream_ptr;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file core_op.hpp
+ * @brief Hence, the hierarchy is as follows:
+ * --------------------------------------------------------------------------------------------------------------
+ * | CoreOp | (Base classes)
+ * | ________________________________|________________________________ |
+ * | / | \ |
+ * | VdmaConfigCoreOp VDeviceCoreOp HcpConfigCoreOp | (Actual implementations)
+ * | | |
+ * | | |
+ * | vector of VdmaConfigCoreOp |
+ * -------------------------------------------------------------------------------------------------------------|
+ * | ActivatedCoreOp | (Base classes)
+ * | __________________________________|_____________________________________ |
+ * | / | \ |
+ * | VdmaConfigActivatedCoreOp VDeviceActivatedCoreOp HcpConfigActivatedCoreOp | (Actual implementations)
+ * | | |
+ * | | |
+ * | vector of VdmaConfigActivatedCoreOp |
+ * --------------------------------------------------------------------------------------------------------------
+ **/
+
+#ifndef _HAILO_CORE_OP_HPP_
+#define _HAILO_CORE_OP_HPP_
+
+#include "hailo/network_group.hpp"
+
+#include "common/latency_meter.hpp"
+
+#include "hef/hef_internal.hpp"
+#include "hef/core_op_metadata.hpp"
+#include "control_protocol.h"
+#include "vdma/channel/boundary_channel.hpp"
+#include "core_op/active_core_op_holder.hpp"
+
+
+namespace hailort
+{
+/** Represents a vector of InputStream ptrs */
+using InputStreamPtrVector = std::vector<std::shared_ptr<InputStream>>;
+
+/** Represents a vector of OutputStream ptrs */
+using OutputStreamPtrVector = std::vector<std::shared_ptr<OutputStream>>;
+
+// ActivatedCoreOp is created with `hailo_activate_network_group_params_t` for legacy reasons.
+// Currently hailo_activate_network_group_params_t is an empty struct holder,
+// when adding parameters to it, consider `hailo_activate_network_group_params_t` should hold one core op in this case.
+class ActivatedCoreOp : public ActivatedNetworkGroup
+{
+public:
+ virtual ~ActivatedCoreOp() = default;
+ ActivatedCoreOp(const ActivatedCoreOp &other) = delete;
+ ActivatedCoreOp &operator=(const ActivatedCoreOp &other) = delete;
+ ActivatedCoreOp &operator=(ActivatedCoreOp &&other) = delete;
+ ActivatedCoreOp(ActivatedCoreOp &&other) = default;
+
+ virtual uint32_t get_invalid_frames_count() override;
+
+protected:
+ hailo_activate_network_group_params_t m_network_group_params;
+
+ ActivatedCoreOp(const hailo_activate_network_group_params_t &network_group_params,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ EventPtr &&core_op_activated_event, hailo_status &status);
+
+ EventPtr m_core_op_activated_event;
+ std::map<std::string, std::shared_ptr<InputStream>> &m_input_streams;
+ std::map<std::string, std::shared_ptr<OutputStream>> &m_output_streams;
+
+private:
+ hailo_status validate_network_group_params(const hailo_activate_network_group_params_t &network_group_params);
+};
+
+
+class CoreOp
+{
+public:
+ virtual ~CoreOp() = default;
+ CoreOp(const CoreOp &other) = delete;
+ CoreOp &operator=(const CoreOp &other) = delete;
+ CoreOp &operator=(CoreOp &&other) = delete;
+ CoreOp(CoreOp &&other) = default;
+
+ std::shared_ptr<CoreOpMetadata> metadata() {
+ return m_metadata;
+ }
+
+ Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_with_batch(
+ uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE,
+ bool resume_pending_stream_transfers = false);
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(const hailo_activate_network_group_params_t &network_group_params);
+ virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout);
+
+ virtual const std::string &name() const;
+
+ virtual bool is_scheduled() const = 0;
+ virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) = 0;
+ virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) = 0;
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) = 0;
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() = 0;
+
+ virtual Expected<InputStreamRefVector> get_input_streams_by_network(const std::string &network_name="");
+ virtual Expected<OutputStreamRefVector> get_output_streams_by_network(const std::string &network_name="");
+ virtual InputStreamRefVector get_input_streams();
+ virtual OutputStreamRefVector get_output_streams();
+ virtual std::vector<std::reference_wrapper<InputStream>> get_input_streams_by_interface(hailo_stream_interface_t stream_interface);
+ virtual std::vector<std::reference_wrapper<OutputStream>> get_output_streams_by_interface(hailo_stream_interface_t stream_interface);
+ virtual ExpectedRef<InputStream> get_input_stream_by_name(const std::string& name);
+ virtual ExpectedRef<OutputStream> get_output_stream_by_name(const std::string& name);
+ virtual Expected<OutputStreamWithParamsVector> get_output_streams_from_vstream_names(
+ const std::map<std::string, hailo_vstream_params_t> &outputs_params);
+ virtual Expected<LatencyMeasurementResult> get_latency_measurement(const std::string &network_name="");
+
+ // TODO: HRT-9546 - Remove func, should be only in CNG
+ virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name="");
+ // TODO: HRT-9546 - Remove func, should be only in CNG
+ virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name="");
+ // TODO: HRT-9546 - Remove func, should be only in CNG
+ virtual Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> make_output_vstream_params_groups(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
+
+ // TODO: HRT-9546 - Remove func, should be only in CNG
+ virtual Expected<std::vector<std::vector<std::string>>> get_output_vstream_groups();
+
+ // TODO: HRT-9546 - Remove func, should be only in CNG
+ Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name);
+
+ virtual hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers = false) = 0;
+ virtual hailo_status deactivate_impl(bool keep_nn_config_during_reset = false) = 0;
+
+ virtual Expected<std::vector<hailo_network_info_t>> get_network_infos() const;
+ virtual Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name="") const;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name="") const;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name="") const;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const;
+ virtual AccumulatorPtr get_activation_time_accumulator() const;
+ virtual AccumulatorPtr get_deactivation_time_accumulator() const;
+ hailo_status create_streams_from_config_params(Device &device);
+
+ virtual bool is_multi_context() const;
+ virtual const ConfigureNetworkParams get_config_params() const;
+
+
+ const SupportedFeatures &get_supported_features();
+ Expected<uint16_t> get_stream_batch_size(const std::string &stream_name);
+
+ std::map<std::string, std::shared_ptr<InputStream>> m_input_streams;
+ std::map<std::string, std::shared_ptr<OutputStream>> m_output_streams;
+
+ // This function is called when a user is creating VStreams and is only relevant for VDeviceCoreOp.
+ // In case a user is using VdmaConfigCoreOp or HcpConfigCoreOp this function should do nothing.
+ virtual void set_vstreams_multiplexer_callbacks(std::vector<OutputVStream> &output_vstreams)
+ {
+ (void)output_vstreams;
+ }
+
+protected:
+ CoreOp(const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status);
+
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) = 0;
+
+ hailo_status create_output_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
+ hailo_status create_input_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
+ hailo_status add_mux_streams_by_edges_names(OutputStreamWithParamsVector &result,
+ const std::unordered_map<std::string, hailo_vstream_params_t> &outputs_edges_params);
+ Expected<OutputStreamPtrVector> get_output_streams_by_vstream_name(const std::string &name);
+
+ hailo_status activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers);
+ hailo_status deactivate_low_level_streams();
+
+ Expected<LayerInfo> get_layer_info(const std::string &stream_name);
+
+ virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() = 0;
+ virtual Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) = 0;
+
+ const ConfigureNetworkParams m_config_params;
+ const uint16_t m_min_configured_batch_size; // TODO: remove after HRT-6535
+ EventPtr m_core_op_activated_event;
+ AccumulatorPtr m_activation_time_accumulator;
+ AccumulatorPtr m_deactivation_time_accumulator;
+ std::shared_ptr<CoreOpMetadata> m_metadata;
+
+private:
+ static uint16_t get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params);
+ hailo_status create_vdma_input_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
+ hailo_status create_vdma_output_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
+ Expected<std::shared_ptr<InputStream>> get_shared_input_stream_by_name(const std::string &stream_name);
+ Expected<std::shared_ptr<OutputStream>> get_shared_output_stream_by_name(const std::string &stream_name);
+
+ friend class VDeviceCoreOp; // VDeviceCoreOp is using protected members and functions from other CoreOps objects
+ friend class VDeviceActivatedCoreOp; // VDeviceActivatedCoreOp is calling CoreOp's protected function `create_activated_network_group`
+ friend class ConfiguredNetworkGroupBase;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CORE_OP_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file channel_allocator.cpp
+ * @brief Allocates vdma channel indexes, allows reusing non-boundary channels between contextes.
+ **/
+
+#include "core_op/resource_manager/channel_allocator.hpp"
+
+
+namespace hailort
+{
+
+ChannelAllocator::ChannelAllocator(size_t max_engines_count) :
+ m_max_engines_count(max_engines_count)
+{}
+
+Expected<vdma::ChannelId> ChannelAllocator::get_available_channel_id(const LayerIdentifier &layer_identifier,
+ HailoRTDriver::DmaDirection direction, uint8_t engine_index)
+{
+ CHECK_AS_EXPECTED(engine_index < m_max_engines_count, HAILO_INVALID_ARGUMENT,
+ "Invalid engine index {}, max is {}", engine_index, m_max_engines_count);
+
+ const auto found_channel = m_allocated_channels.find(layer_identifier);
+ if (found_channel != m_allocated_channels.end()) {
+ CHECK_AS_EXPECTED(found_channel->second.engine_index == engine_index, HAILO_INTERNAL_FAILURE,
+ "Mismatch engine index");
+ return Expected<vdma::ChannelId>(found_channel->second);
+ }
+
+ // If we reach here, we need to allocate channel index for that layer.
+ std::set<vdma::ChannelId> currently_used_channel_indexes;
+ for (auto channel_id_pair : m_allocated_channels) {
+ currently_used_channel_indexes.insert(channel_id_pair.second);
+ }
+
+ uint8_t min_channel_index =
+ (direction == HailoRTDriver::DmaDirection::H2D) ? MIN_H2D_CHANNEL_INDEX : MIN_D2H_CHANNEL_INDEX;
+ uint8_t max_channel_index =
+ (direction == HailoRTDriver::DmaDirection::H2D) ? MAX_H2D_CHANNEL_INDEX : MAX_D2H_CHANNEL_INDEX;
+
+ for (uint8_t index = min_channel_index; index <= max_channel_index; ++index) {
+ const vdma::ChannelId channel_id = {engine_index, index};
+
+ // Check that the channel is not currently in use.
+ if (contains(currently_used_channel_indexes, channel_id)) {
+ continue;
+ }
+
+ // In the case of boundary channels, if the channel id was used in previous context as an internal channel (and
+ // it was freed, so it doesn't appear in `currently_used_channel_index`), we can't reuse it.
+ if (std::get<0>(layer_identifier) == LayerType::BOUNDARY) {
+ if (contains(m_internal_channel_ids, channel_id)) {
+ continue;
+ }
+ }
+
+ // Found it
+ insert_new_channel_id(layer_identifier, channel_id);
+ return Expected<vdma::ChannelId>(channel_id);
+ }
+
+ LOGGER__ERROR("Failed to get available channel_index");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+hailo_status ChannelAllocator::free_channel_index(const LayerIdentifier &layer_identifier)
+{
+ auto layer_channel_pair = m_allocated_channels.find(layer_identifier);
+ CHECK(m_allocated_channels.end() != layer_channel_pair, HAILO_INTERNAL_FAILURE, "Failed to free channel");
+ CHECK(std::get<0>(layer_channel_pair->first) != LayerType::BOUNDARY, HAILO_INTERNAL_FAILURE,
+ "Can't free boundary channels");
+
+ m_allocated_channels.erase(layer_channel_pair);
+ return HAILO_SUCCESS;
+}
+
+void ChannelAllocator::insert_new_channel_id(const LayerIdentifier &layer_identifier, const vdma::ChannelId &channel_id)
+{
+ if (LayerType::BOUNDARY == std::get<0>(layer_identifier)) {
+ m_boundary_channel_ids.insert(channel_id);
+ } else {
+ m_internal_channel_ids.insert(channel_id);
+ }
+
+ m_allocated_channels.emplace(layer_identifier, channel_id);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file channel_allocator.hpp
+ * @brief Allocates vdma channel indexes, allows reusing non-boundary channels between contextes.
+ **/
+
+#ifndef _HAILO_CHANNEL_ALLOCATOR_HPP_
+#define _HAILO_CHANNEL_ALLOCATOR_HPP_
+
+#include "hailo/hailort.h"
+
+#include "vdma/memory/descriptor_list.hpp"
+#include "vdma/channel/channel_id.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+#include "hef/layer_info.hpp"
+
+#include <array>
+
+
+namespace hailort
+{
+
+class ChannelAllocator final
+{
+public:
+ explicit ChannelAllocator(size_t max_engines_count);
+ ChannelAllocator(ChannelAllocator &&other) = default;
+
+ Expected<vdma::ChannelId> get_available_channel_id(const LayerIdentifier &layer_identifier,
+ HailoRTDriver::DmaDirection direction, uint8_t engine_index);
+ hailo_status free_channel_index(const LayerIdentifier &layer_identifier);
+
+private:
+ void insert_new_channel_id(const LayerIdentifier &layer_identifier, const vdma::ChannelId &channel_id);
+
+ const size_t m_max_engines_count;
+
+ // Contains all channels that are currently used. This channels are released in the free_channel_index.
+ std::map<LayerIdentifier, vdma::ChannelId> m_allocated_channels;
+
+ // Contains all channels id allocated for the network group. This channels are never released.
+ std::set<vdma::ChannelId> m_boundary_channel_ids;
+ std::set<vdma::ChannelId> m_internal_channel_ids;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CHANNEL_ALLOCATOR_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file config_buffer.cpp
+ * @brief Manages configuration vdma buffer. The configuration buffer contains nn-configurations in a specific
+ * hw format (ccw).
+ */
+
+#include "core_op/resource_manager/config_buffer.hpp"
+#include "vdma/memory/sg_buffer.hpp"
+#include "vdma/memory/continuous_buffer.hpp"
+
+#include <numeric>
+
+
+namespace hailort {
+
+Expected<ConfigBuffer> ConfigBuffer::create(HailoRTDriver &driver, vdma::ChannelId channel_id,
+ const std::vector<uint32_t> &cfg_sizes)
+{
+ const auto buffer_size = std::accumulate(cfg_sizes.begin(), cfg_sizes.end(), 0);
+
+ auto buffer_ptr = should_use_ccb(driver) ?
+ create_ccb_buffer(driver, buffer_size) :
+ create_sg_buffer(driver, channel_id, cfg_sizes);
+ CHECK_EXPECTED(buffer_ptr);
+
+ return ConfigBuffer(buffer_ptr.release(), channel_id, buffer_size);
+}
+
+ConfigBuffer::ConfigBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer,
+ vdma::ChannelId channel_id, size_t total_buffer_size)
+ : m_buffer(std::move(buffer)),
+ m_channel_id(channel_id),
+ m_total_buffer_size(total_buffer_size), m_acc_buffer_offset(0), m_acc_desc_count(0),
+ m_current_buffer_size(0)
+{}
+
+Expected<uint32_t> ConfigBuffer::program_descriptors()
+{
+ // TODO HRT-9657: remove DEVICE interrupts
+ auto descriptors_count =
+ m_buffer->program_descriptors(m_acc_buffer_offset, vdma::InterruptsDomain::DEVICE, m_acc_desc_count, false);
+ CHECK_EXPECTED(descriptors_count);
+
+ m_acc_desc_count += descriptors_count.value();
+ m_acc_buffer_offset = 0;
+
+ return descriptors_count;
+}
+
+hailo_status ConfigBuffer::pad_with_nops()
+{
+ static constexpr uint64_t CCW_NOP = 0x0;
+
+ auto page_size = desc_page_size();
+ auto buffer_size = m_total_buffer_size;
+ auto buffer_residue = buffer_size % page_size;
+ if (0 != buffer_residue % CCW_HEADER_SIZE) {
+ LOGGER__ERROR("CFG channel buffer size must be a multiple of CCW header size ({})", CCW_HEADER_SIZE);
+ return HAILO_INTERNAL_FAILURE;
+ }
+ /* If buffer does not fit info descriptor, the host must pad the buffer with CCW NOPs. */
+ auto nop_count = (buffer_residue == 0) ? 0 : ((page_size - buffer_residue) / CCW_HEADER_SIZE);
+ for (uint8_t nop_index = 0; nop_index < nop_count; nop_index++) {
+ /* Generate nop transaction.
+ CCW of all zeros (64'h0) should be treated as NOP - ignore CCW and expect CCW in next 64b word.
+ When CSM recognize it is a NOP it pops it from the channel FIFO without forward any address/data/command,
+ does not contribute to CRC calculations but return credits to the peripheral as usual. */
+ auto status = write_inner(MemoryView::create_const(reinterpret_cast<const void *>(&CCW_NOP), sizeof(CCW_NOP)));
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+}
+
+
+hailo_status ConfigBuffer::write(const MemoryView &data)
+{
+ CHECK(data.size() <= size_left(), HAILO_INTERNAL_FAILURE, "Write too many config words");
+ auto status = write_inner(data);
+ CHECK_SUCCESS(status);
+
+ m_current_buffer_size += data.size();
+ return HAILO_SUCCESS;
+}
+
+size_t ConfigBuffer::size_left() const
+{
+ assert(m_total_buffer_size >= m_current_buffer_size);
+ return m_total_buffer_size - m_current_buffer_size;
+}
+
+size_t ConfigBuffer::get_current_buffer_size() const
+{
+ return m_current_buffer_size;
+}
+
+uint16_t ConfigBuffer::desc_page_size() const
+{
+ return m_buffer->desc_page_size();
+}
+
+vdma::ChannelId ConfigBuffer::channel_id() const
+{
+ return m_channel_id;
+}
+
+CONTROL_PROTOCOL__host_buffer_info_t ConfigBuffer::get_host_buffer_info() const
+{
+ return m_buffer->get_host_buffer_info(m_acc_desc_count * m_buffer->desc_page_size());
+}
+
+hailo_status ConfigBuffer::write_inner(const MemoryView &data)
+{
+ size_t total_offset = (m_acc_desc_count * m_buffer->desc_page_size()) + m_acc_buffer_offset;
+ auto status = m_buffer->write(data.data(), data.size(), total_offset);
+ CHECK_SUCCESS(status);
+
+ m_acc_buffer_offset += data.size();
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<vdma::VdmaBuffer>> ConfigBuffer::create_sg_buffer(HailoRTDriver &driver,
+ vdma::ChannelId channel_id, const std::vector<uint32_t> &cfg_sizes)
+{
+ auto desc_sizes_pair = vdma::DescriptorList::get_desc_buffer_sizes_for_multiple_transfers(driver, 1, cfg_sizes);
+ CHECK_EXPECTED(desc_sizes_pair);
+ const auto page_size = desc_sizes_pair->first;
+ const auto descs_count = desc_sizes_pair->second;
+
+ size_t buffer_size = 0;
+ for (const auto cfg_size : cfg_sizes) {
+ const auto descs_count_for_cfg = DIV_ROUND_UP(cfg_size, page_size);
+ buffer_size += descs_count_for_cfg * page_size;
+ }
+
+ auto buffer = vdma::SgBuffer::create(driver, buffer_size, descs_count, page_size, HailoRTDriver::DmaDirection::H2D,
+ channel_id);
+ CHECK_EXPECTED(buffer);
+
+ auto buffer_ptr = make_unique_nothrow<vdma::SgBuffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
+}
+
+Expected<std::unique_ptr<vdma::VdmaBuffer>> ConfigBuffer::create_ccb_buffer(HailoRTDriver &driver,
+ uint32_t buffer_size)
+{
+ buffer_size = vdma::ContinuousBuffer::get_buffer_size(buffer_size);
+ auto buffer = vdma::ContinuousBuffer::create(buffer_size, driver);
+ CHECK_EXPECTED(buffer);
+
+ auto buffer_ptr = make_unique_nothrow<vdma::ContinuousBuffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
+}
+
+bool ConfigBuffer::should_use_ccb(HailoRTDriver &driver)
+{
+ switch (driver.dma_type()) {
+ case HailoRTDriver::DmaType::PCIE:
+ return false;
+ case HailoRTDriver::DmaType::DRAM:
+ if (std::getenv("HAILO_FORCE_CONF_CHANNEL_OVER_DESC") != nullptr) {
+ LOGGER__WARNING("Using desc instead of CCB for config channel is not optimal for performance.\n");
+ return false;
+ }
+ else {
+ return true;
+ }
+ }
+
+ // Shouldn't reach here
+ assert(false);
+ return false;
+}
+
+} /* hailort */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file config_buffer.hpp
+ * @brief Manages configuration vdma buffer. The configuration buffer contains nn-configurations in a specific
+ * hw format (ccw).
+ */
+
+#ifndef _HAILO_CONFIG_BUFFER_HPP_
+#define _HAILO_CONFIG_BUFFER_HPP_
+
+#include "hailo/buffer.hpp"
+
+#include "vdma/memory/vdma_buffer.hpp"
+
+
+namespace hailort {
+
+#define CCW_BYTES_IN_WORD (4)
+#define CCW_DATA_OFFSET (CCW_BYTES_IN_WORD * 2)
+#define CCW_HEADER_SIZE (CCW_DATA_OFFSET)
+
+
+class ConfigBuffer final
+{
+public:
+ static Expected<ConfigBuffer> create(HailoRTDriver &driver, vdma::ChannelId channel_id,
+ const std::vector<uint32_t> &cfg_sizes);
+
+ // Write data to config channel
+ hailo_status write(const MemoryView &data);
+
+ // Program the descriptors for the data written so far
+ Expected<uint32_t> program_descriptors();
+
+ // On prefetch mode, we need to pad the config buffer with nops BEFORE the last write.
+ hailo_status pad_with_nops();
+
+ // Amount of bytes left to write into the buffer.
+ size_t size_left() const;
+
+ // Amount of bytes already written.
+ size_t get_current_buffer_size() const;
+
+ uint16_t desc_page_size() const;
+ vdma::ChannelId channel_id() const;
+ CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;
+
+private:
+ ConfigBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, vdma::ChannelId channel_id, size_t total_buffer_size);
+
+ hailo_status write_inner(const MemoryView &data);
+
+ static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,
+ vdma::ChannelId channel_id, const std::vector<uint32_t> &cfg_sizes);
+ static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_ccb_buffer(HailoRTDriver &driver,
+ uint32_t buffer_size);
+
+ static bool should_use_ccb(HailoRTDriver &driver);
+
+ std::unique_ptr<vdma::VdmaBuffer> m_buffer;
+ vdma::ChannelId m_channel_id;
+ const size_t m_total_buffer_size;
+ size_t m_acc_buffer_offset;
+ uint32_t m_acc_desc_count;
+ size_t m_current_buffer_size;
+};
+
+} /* hailort */
+
+#endif /* _HAILO_CONFIG_BUFFER_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file context_switch_buffer_builder.cpp
+ * @brief Class used to build the context switch buffer sent to the firmware
+ **/
+
+#include "context_switch_buffer_builder.hpp"
+
+namespace hailort
+{
+
+ContextSwitchBufferBuilder::ContextSwitchBufferBuilder(CONTROL_PROTOCOL__context_switch_context_type_t context_type) :
+ m_context_type(context_type)
+{
+ // Initialize first control
+ start_new_control();
+}
+
+void ContextSwitchBufferBuilder::write_action(MemoryView action)
+{
+ assert(action.size() < std::numeric_limits<uint32_t>::max());
+ const uint32_t action_size = static_cast<uint32_t>(action.size());
+
+ if (!has_space_for_action(action_size)) {
+ // Size exceeded single control size, creating a new control buffer.
+ start_new_control();
+ }
+
+ auto &control = current_control();
+ memcpy(&control.context_network_data[control.context_network_data_length], action.data(), action_size);
+ control.context_network_data_length += action_size;
+}
+
+const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &ContextSwitchBufferBuilder::get_controls() const
+{
+ return m_controls;
+}
+
+CONTROL_PROTOCOL__context_switch_context_info_single_control_t &ContextSwitchBufferBuilder::current_control()
+{
+ assert(!m_controls.empty());
+ return m_controls.back();
+}
+
+bool ContextSwitchBufferBuilder::has_space_for_action(uint32_t action_size)
+{
+ auto &control = current_control();
+ return (control.context_network_data_length + action_size) <= ARRAY_ENTRIES(control.context_network_data);
+}
+
+void ContextSwitchBufferBuilder::start_new_control()
+{
+ if (!m_controls.empty()) {
+ current_control().is_last_control_per_context = false;
+ }
+
+ // Creating a new control directly inside the vector to avoid copying the control struct.
+ m_controls.emplace_back();
+ auto &new_control = current_control();
+ new_control.context_network_data_length = 0;
+ new_control.context_type = static_cast<uint8_t>(m_context_type);
+ new_control.is_first_control_per_context = (1 == m_controls.size());
+ new_control.is_last_control_per_context = true;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file context_switch_buffer_builder.hpp
+ * @brief Class used to build the context switch buffer sent to the firmware.
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_BUFFER_BUILDER_HPP_
+#define _HAILO_CONTEXT_SWITCH_BUFFER_BUILDER_HPP_
+
+#include "hailo/hailort.h"
+
+#include "vdma/channel/channel_id.hpp"
+#include "device_common/control_protocol.hpp"
+#include "hef/layer_info.hpp"
+
+
+namespace hailort
+{
+
+// This class manages a vector of CONTROL_PROTOCOL__context_switch_context_info_single_control_t controls to be sent
+// to the firmware. Actions are written to the control buffer, until we reach the maximum control size, then we will
+// start a new control.
+class ContextSwitchBufferBuilder final {
+public:
+ ContextSwitchBufferBuilder(CONTROL_PROTOCOL__context_switch_context_type_t context_type);
+
+ void write_action(MemoryView action);
+ const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &get_controls() const;
+
+private:
+ CONTROL_PROTOCOL__context_switch_context_info_single_control_t ¤t_control();
+ bool has_space_for_action(uint32_t action_size);
+ void start_new_control();
+
+ CONTROL_PROTOCOL__context_switch_context_type_t m_context_type;
+ std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> m_controls;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_BUFFER_BUILDER_HPP_ */
--- /dev/null
+/**\r
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file ddr_channels_pair.cpp\r
+ **/\r
+\r
+#include "common/utils.hpp"\r
+\r
+#include "core_op/resource_manager/ddr_channels_pair.hpp"\r
+#include "vdma/memory/continuous_buffer.hpp"\r
+#include "vdma/memory/sg_buffer.hpp"\r
+\r
+\r
+namespace hailort\r
+{\r
+\r
+\r
+Expected<DdrChannelsPair> DdrChannelsPair::create(HailoRTDriver &driver, const DdrChannelsInfo &ddr_channels_info)\r
+{\r
+ auto buffer_exp = should_use_ccb(driver) ?\r
+ create_ccb_buffer(driver, ddr_channels_info.row_size, ddr_channels_info.min_buffered_rows) :\r
+ create_sg_buffer(driver, ddr_channels_info.row_size, ddr_channels_info.min_buffered_rows, ddr_channels_info.d2h_channel_id);\r
+ CHECK_EXPECTED(buffer_exp);\r
+ auto buffer_ptr = buffer_exp.release();\r
+\r
+ CHECK_AS_EXPECTED(0 == (ddr_channels_info.row_size % buffer_ptr->desc_page_size()), HAILO_INTERNAL_FAILURE,\r
+ "DDR channel buffer row size must be a multiple of descriptor page size");\r
+\r
+ const auto interrupts_domain = vdma::InterruptsDomain::NONE;\r
+ const auto total_size = buffer_ptr->descs_count() * buffer_ptr->desc_page_size();\r
+ auto desc_count_local = buffer_ptr->program_descriptors(total_size, interrupts_domain, 0, true);\r
+ CHECK_EXPECTED(desc_count_local);\r
+\r
+ return DdrChannelsPair(std::move(buffer_ptr), ddr_channels_info);\r
+}\r
+\r
+uint16_t DdrChannelsPair::descs_count() const\r
+{\r
+ assert(IS_FIT_IN_UINT16(m_buffer->descs_count()));\r
+ return static_cast<uint16_t>(m_buffer->descs_count());\r
+}\r
+\r
+uint32_t DdrChannelsPair::descriptors_per_frame() const\r
+{\r
+ return (m_info.row_size / m_buffer->desc_page_size()) * m_info.total_buffers_per_frame;\r
+}\r
+\r
+Expected<Buffer> DdrChannelsPair::read() const\r
+{\r
+ const auto size = m_buffer->size();\r
+ auto res = Buffer::create(size);\r
+ CHECK_EXPECTED(res);\r
+\r
+ auto status = m_buffer->read(res->data(), size, 0);\r
+ CHECK_SUCCESS_AS_EXPECTED(status);\r
+\r
+ return res.release();\r
+}\r
+\r
+const DdrChannelsInfo& DdrChannelsPair::info() const\r
+{\r
+ return m_info;\r
+}\r
+\r
+\r
+bool DdrChannelsPair::need_manual_credit_management() const\r
+{\r
+ // On scatter gather manual credit management is needed\r
+ return m_buffer->type() == vdma::VdmaBuffer::Type::SCATTER_GATHER;\r
+}\r
+\r
+CONTROL_PROTOCOL__host_buffer_info_t DdrChannelsPair::get_host_buffer_info() const\r
+{\r
+ return m_buffer->get_host_buffer_info(m_info.row_size);\r
+}\r
+\r
+Expected<std::unique_ptr<vdma::VdmaBuffer>> DdrChannelsPair::create_sg_buffer(HailoRTDriver &driver,\r
+ uint32_t row_size, uint16_t buffered_rows, vdma::ChannelId d2h_channel_id)\r
+{\r
+ auto desc_sizes_pair = vdma::DescriptorList::get_desc_buffer_sizes_for_single_transfer(driver,\r
+ buffered_rows, buffered_rows, row_size);\r
+ CHECK_EXPECTED(desc_sizes_pair);\r
+ const auto desc_page_size = desc_sizes_pair->first;\r
+ const auto descs_count = desc_sizes_pair->second;\r
+ // DdrChannels are circular so we need to allocate the full descriptors list.\r
+ const auto buffer_size = desc_page_size * descs_count;\r
+\r
+ auto buffer = vdma::SgBuffer::create(driver, buffer_size, descs_count, desc_page_size,\r
+ HailoRTDriver::DmaDirection::BOTH, d2h_channel_id);\r
+ CHECK_EXPECTED(buffer);\r
+\r
+ auto buffer_ptr = make_unique_nothrow<vdma::SgBuffer>(buffer.release());\r
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);\r
+\r
+ return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));\r
+}\r
+\r
+DdrChannelsPair::DdrChannelsPair(std::unique_ptr<vdma::VdmaBuffer> &&buffer, const DdrChannelsInfo &ddr_channels_info) :\r
+ m_buffer(std::move(buffer)),\r
+ m_info(ddr_channels_info)\r
+{}\r
+\r
+Expected<std::unique_ptr<vdma::VdmaBuffer>> DdrChannelsPair::create_ccb_buffer(HailoRTDriver &driver,\r
+ uint32_t row_size, uint16_t buffered_rows)\r
+{\r
+ // The first 12 channels in D2H CCB ("regular channels") requires that the amount of descriptors will be a power\r
+ // of 2. Altough the 4 last channels ("enhanced channels") don't have this requirements, we keep the code the same.\r
+ auto buffer_size = vdma::ContinuousBuffer::get_buffer_size_desc_power2(row_size * buffered_rows);\r
+ auto buffer = vdma::ContinuousBuffer::create(buffer_size, driver);\r
+ CHECK_EXPECTED(buffer);\r
+\r
+ auto buffer_ptr = make_unique_nothrow<vdma::ContinuousBuffer>(buffer.release());\r
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);\r
+\r
+ return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));\r
+}\r
+\r
+bool DdrChannelsPair::should_use_ccb(HailoRTDriver &driver)\r
+{\r
+ switch (driver.dma_type()) {\r
+ case HailoRTDriver::DmaType::PCIE:\r
+ return false;\r
+ case HailoRTDriver::DmaType::DRAM:\r
+ if (std::getenv("HAILO_FORCE_DDR_CHANNEL_OVER_DESC") != nullptr) {\r
+ LOGGER__WARNING("Using desc instead of CCB for ddr channel is not optimal for performance.\n");\r
+ return false;\r
+ }\r
+ else {\r
+ return true;\r
+ }\r
+ }\r
+\r
+ // Shouldn't reach here\r
+ assert(false);\r
+ return false;\r
+}\r
+\r
+} /* namespace hailort */\r
--- /dev/null
+/**\r
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file ddr_channels_pair.hpp\r
+ * @brief DDR channel pairs are pair of vdma channels used in the same context for skip-connection.\r
+ **/\r
+\r
+#ifndef _HAILO_DDR_CHANNELS_PAIR_HPP_\r
+#define _HAILO_DDR_CHANNELS_PAIR_HPP_\r
+\r
+#include "hailo/hailort.h"\r
+#include "hailo/buffer.hpp"\r
+\r
+#include "vdma/memory/vdma_buffer.hpp"\r
+\r
+\r
+namespace hailort\r
+{\r
+\r
+struct DdrChannelsInfo\r
+{\r
+ vdma::ChannelId d2h_channel_id;\r
+ uint8_t d2h_stream_index;\r
+ vdma::ChannelId h2d_channel_id;\r
+ uint8_t h2d_stream_index;\r
+ uint8_t network_index;\r
+ uint16_t row_size;\r
+ uint16_t min_buffered_rows;\r
+ // total_buffers_per_frame not same as core_buffer_per frame. \r
+ //(In DDR core buffer per frame is 1). Used to calc total host descriptors_per_frame. \r
+ uint16_t total_buffers_per_frame;\r
+};\r
+\r
+class DdrChannelsPair final\r
+{\r
+public:\r
+ static Expected<DdrChannelsPair> create(HailoRTDriver &driver, const DdrChannelsInfo &ddr_channels_info);\r
+\r
+ uint16_t descs_count() const;\r
+ uint32_t descriptors_per_frame() const;\r
+ Expected<Buffer> read() const;\r
+ const DdrChannelsInfo & info() const;\r
+\r
+ // Checks if the credits are automaticaly going from d2h channel to its h2d channel, or it needs to be done manually\r
+ // (Using a fw task).\r
+ bool need_manual_credit_management() const;\r
+\r
+ CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;\r
+\r
+private:\r
+ DdrChannelsPair(std::unique_ptr<vdma::VdmaBuffer> &&buffer, const DdrChannelsInfo &ddr_channels_info);\r
+\r
+ static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,\r
+ uint32_t row_size, uint16_t buffered_rows, vdma::ChannelId d2h_channel_id);\r
+ static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_ccb_buffer(HailoRTDriver &driver,\r
+ uint32_t row_size, uint16_t buffered_rows);\r
+\r
+ static bool should_use_ccb(HailoRTDriver &driver);\r
+\r
+ std::unique_ptr<vdma::VdmaBuffer> m_buffer;\r
+ DdrChannelsInfo m_info;\r
+};\r
+\r
+} /* namespace hailort */\r
+\r
+#endif /* _HAILO_DDR_CHANNELS_PAIR_HPP_ */\r
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file inter_context_buffer.cpp
+ * @brief Manages inter-context buffer.
+ */
+
+#include "core_op/resource_manager/resource_manager.hpp"
+#include "core_op/resource_manager/inter_context_buffer.hpp"
+#include "vdma/memory/sg_buffer.hpp"
+#include "vdma/memory/continuous_buffer.hpp"
+
+
+namespace hailort
+{
+
+Expected<InterContextBuffer> InterContextBuffer::create(HailoRTDriver &driver, uint32_t transfer_size,
+ uint16_t max_batch_size, vdma::ChannelId d2h_channel_id)
+{
+ auto buffer_exp = should_use_ccb(driver) ?
+ create_ccb_buffer(driver, transfer_size, max_batch_size) :
+ create_sg_buffer(driver, transfer_size, max_batch_size, d2h_channel_id);
+ CHECK_EXPECTED(buffer_exp);
+ auto buffer_ptr = buffer_exp.release();
+
+ size_t acc_offset = 0;
+ for (uint16_t i = 0; i < max_batch_size; i++) {
+ const auto last_desc_interrupts_domain = ((max_batch_size - 1) == i) ?
+ vdma::InterruptsDomain::DEVICE : vdma::InterruptsDomain::NONE;
+ static const auto BUFFER_NOT_CIRCULAR = false;
+ auto desc_count_local = buffer_ptr->program_descriptors(transfer_size, last_desc_interrupts_domain, acc_offset,
+ BUFFER_NOT_CIRCULAR);
+ CHECK_EXPECTED(desc_count_local, "Failed to program descs for inter context channels. Given max_batch_size is too big.");
+ acc_offset += desc_count_local.value();
+ }
+
+ return InterContextBuffer(std::move(buffer_ptr), transfer_size, max_batch_size);
+}
+
+hailo_status InterContextBuffer::reprogram(uint16_t batch_size)
+{
+ const auto prev_batch_size = m_dynamic_batch_size;
+ auto status = set_dynamic_batch_size(batch_size);
+ CHECK_SUCCESS(status);
+
+ if (prev_batch_size == m_dynamic_batch_size) {
+ LOGGER__TRACE("Batch size hasn't changed ({}); nothing to be done.", batch_size);
+ return HAILO_SUCCESS;
+ }
+
+ status = m_buffer->reprogram_device_interrupts_for_end_of_batch(m_transfer_size, prev_batch_size,
+ vdma::InterruptsDomain::NONE);
+ CHECK_SUCCESS(status, "Failed reprogramming device interrupts for the end of the previous batch (size {})",
+ prev_batch_size);
+ status = m_buffer->reprogram_device_interrupts_for_end_of_batch(m_transfer_size, m_dynamic_batch_size,
+ vdma::InterruptsDomain::DEVICE);
+ CHECK_SUCCESS(status, "Failed reprogramming device interrupts for the end of the current batch (size {})",
+ m_dynamic_batch_size);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<Buffer> InterContextBuffer::read()
+{
+ const auto size = m_transfer_size * m_dynamic_batch_size;
+ assert(size <= m_buffer->size());
+
+ auto res = Buffer::create(size);
+ CHECK_EXPECTED(res);
+
+ auto status = m_buffer->read(res->data(), size, 0);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return res.release();
+}
+
+CONTROL_PROTOCOL__host_buffer_info_t InterContextBuffer::get_host_buffer_info() const
+{
+ return m_buffer->get_host_buffer_info(m_transfer_size);
+}
+
+InterContextBuffer::InterContextBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size,
+ uint16_t batch_size) :
+ m_buffer(std::move(buffer)),
+ m_transfer_size(transfer_size),
+ m_max_batch_size(batch_size),
+ m_dynamic_batch_size(batch_size)
+{}
+
+hailo_status InterContextBuffer::set_dynamic_batch_size(uint16_t batch_size)
+{
+ if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == batch_size) {
+ LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == batch_size; "
+ "Leaving previously set value of {}", m_dynamic_batch_size);
+ } else {
+ CHECK(batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
+ "batch_size ({}) must be <= than m_max_batch_size ({})",
+ batch_size, m_max_batch_size);
+
+ LOGGER__TRACE("Setting intermediate buffer's batch_size to {}", batch_size);
+ m_dynamic_batch_size = batch_size;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<vdma::VdmaBuffer>> InterContextBuffer::create_sg_buffer(HailoRTDriver &driver,
+ uint32_t transfer_size, uint16_t batch_size, vdma::ChannelId d2h_channel_id)
+{
+ auto desc_sizes_pair = vdma::DescriptorList::get_desc_buffer_sizes_for_single_transfer(driver,
+ batch_size, batch_size, transfer_size);
+ CHECK_EXPECTED(desc_sizes_pair);
+ const auto desc_page_size = desc_sizes_pair->first;
+ const auto descs_count = desc_sizes_pair->second;
+
+ // TODO: HRT-9914 - Instead of using aligned descriptor for each transfer, we should do it for the all frame.
+ const size_t desc_per_transfer = DIV_ROUND_UP(transfer_size, desc_page_size);
+ const size_t buffer_size = desc_per_transfer * desc_page_size * batch_size;
+ auto buffer = vdma::SgBuffer::create(driver, buffer_size, descs_count, desc_page_size,
+ HailoRTDriver::DmaDirection::BOTH, d2h_channel_id);
+ CHECK_EXPECTED(buffer);
+
+ auto buffer_ptr = make_unique_nothrow<vdma::SgBuffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
+}
+
+Expected<std::unique_ptr<vdma::VdmaBuffer>> InterContextBuffer::create_ccb_buffer(HailoRTDriver &driver,
+ uint32_t transfer_size, uint16_t batch_size)
+{
+ // The first 12 channels in D2H CCB ("regular channels") requires that the amount of descriptors will be a power
+ // of 2. Altough the 4 last channels ("enhanced channels") don't have this requirements, we keep the code the same.
+ auto buffer_size = vdma::ContinuousBuffer::get_buffer_size_desc_power2(transfer_size * batch_size);
+ auto buffer = vdma::ContinuousBuffer::create(buffer_size, driver);
+ CHECK_EXPECTED(buffer);
+
+ auto buffer_ptr = make_unique_nothrow<vdma::ContinuousBuffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
+}
+
+bool InterContextBuffer::should_use_ccb(HailoRTDriver &driver)
+{
+ switch (driver.dma_type()) {
+ case HailoRTDriver::DmaType::PCIE:
+ return false;
+ case HailoRTDriver::DmaType::DRAM:
+ if (nullptr == std::getenv("HAILO_FORCE_INFER_CONTEXT_CHANNEL_OVER_DESC")) {
+ return false;
+ }
+ else {
+ LOGGER__INFO("Using (non default mode) CCB for inter context channels.\n");
+ return true;
+ }
+ }
+
+ // Shouldn't reach here
+ assert(false);
+ return false;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file inter_context_buffer.hpp
+ * @brief Manages inter-context buffer.
+ */
+
+#ifndef _HAILO_INTER_CONTEXT_BUFFER_HPP_
+#define _HAILO_INTER_CONTEXT_BUFFER_HPP_
+
+#include "hailo/expected.hpp"
+#include "hailo/buffer.hpp"
+
+#include "os/hailort_driver.hpp"
+#include "vdma/memory/vdma_buffer.hpp"
+
+#include "control_protocol.h"
+
+
+namespace hailort
+{
+
+class InterContextBuffer final {
+public:
+ static Expected<InterContextBuffer> create(HailoRTDriver &driver, uint32_t transfer_size,
+ uint16_t max_batch_size, vdma::ChannelId d2h_channel_id);
+
+ hailo_status reprogram(uint16_t batch_size);
+ Expected<Buffer> read();
+
+ CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;
+
+private:
+ InterContextBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size, uint16_t batch_size);
+ hailo_status set_dynamic_batch_size(uint16_t batch_size);
+
+ static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,
+ uint32_t transfer_size, uint16_t batch_size, vdma::ChannelId d2h_channel_id);
+ static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_ccb_buffer(HailoRTDriver &driver,
+ uint32_t transfer_size, uint16_t batch_size);
+
+ static bool should_use_ccb(HailoRTDriver &driver);
+
+ std::unique_ptr<vdma::VdmaBuffer> m_buffer;
+ const uint32_t m_transfer_size;
+ const uint16_t m_max_batch_size;
+ uint16_t m_dynamic_batch_size;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_INTER_CONTEXT_BUFFER_HPP_ */
\ No newline at end of file
--- /dev/null
+#include "hailo/hailort_defaults.hpp"
+
+#include "core_op/resource_manager/resource_manager.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+#include "device_common/control.hpp"
+
+#include <numeric>
+
+
+namespace hailort
+{
+
+Expected<ContextResources> ContextResources::create(HailoRTDriver &driver,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, const std::vector<vdma::ChannelId> &config_channels_ids,
+ const ConfigBufferInfoMap &config_buffer_infos)
+{
+ CHECK_AS_EXPECTED(context_type < CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_COUNT, HAILO_INVALID_ARGUMENT);
+
+ CHECK_AS_EXPECTED(config_buffer_infos.size() <= config_channels_ids.size(), HAILO_INTERNAL_FAILURE,
+ "config_buffer_infos size ({}) is bigger than config_channels_id count ({})",
+ config_buffer_infos.size(), config_channels_ids.size());
+
+ std::vector<ConfigBuffer> config_buffers;
+ config_buffers.reserve(config_buffer_infos.size());
+ for (uint8_t config_stream_index = 0; config_stream_index < config_buffer_infos.size(); config_stream_index++) {
+ auto buffer_resource = ConfigBuffer::create(driver, config_channels_ids[config_stream_index],
+ config_buffer_infos.at(config_stream_index));
+ CHECK_EXPECTED(buffer_resource);
+ config_buffers.emplace_back(buffer_resource.release());
+ }
+
+ return ContextResources(driver, context_type, std::move(config_buffers));
+}
+
+const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &ContextResources::get_controls() const
+{
+ return m_builder.get_controls();
+}
+
+ContextSwitchBufferBuilder &ContextResources::builder()
+{
+ return m_builder;
+}
+
+void ContextResources::add_edge_layer(const LayerInfo &layer_info, vdma::ChannelId channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &buffer_info)
+{
+ m_edge_layers.emplace_back(EdgeLayer{
+ layer_info,
+ channel_id,
+ buffer_info
+ });
+}
+
+std::vector<EdgeLayer> ContextResources::get_edge_layers() const
+{
+ return m_edge_layers;
+}
+
+std::vector<EdgeLayer> ContextResources::get_edge_layers(LayerType layer_type) const
+{
+ return get_edge_layers(layer_type, HAILO_STREAM_DIRECTION_MAX_ENUM);
+}
+
+std::vector<EdgeLayer> ContextResources::get_edge_layers(hailo_stream_direction_t direction) const
+{
+ return get_edge_layers(LayerType::NOT_SET, direction);
+}
+
+std::vector<EdgeLayer> ContextResources::get_edge_layers(LayerType layer_type, hailo_stream_direction_t direction) const
+{
+ std::vector<EdgeLayer> edge_layers;
+ for (const auto &edge_layer : m_edge_layers) {
+ const bool layer_type_ok = (layer_type == LayerType::NOT_SET) || (edge_layer.layer_info.type == layer_type);
+ const bool direction_ok = (direction == HAILO_STREAM_DIRECTION_MAX_ENUM) || (edge_layer.layer_info.direction == direction);
+ if (layer_type_ok && direction_ok) {
+ edge_layers.emplace_back(edge_layer);
+ }
+ }
+ return edge_layers;
+}
+
+Expected<EdgeLayer> ContextResources::get_edge_layer_by_stream_index(uint8_t stream_index) const
+{
+ for (const auto &edge_layer : m_edge_layers) {
+ if (edge_layer.layer_info.stream_index == stream_index) {
+ return EdgeLayer(edge_layer);
+ }
+ }
+
+ LOGGER__ERROR("Edge layer does not exists for stream {}", stream_index);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+
+ExpectedRef<DdrChannelsPair> ContextResources::create_ddr_channels_pair(const DdrChannelsInfo &ddr_info)
+{
+ auto buffer = DdrChannelsPair::create(m_driver, ddr_info);
+ CHECK_EXPECTED(buffer);
+
+ m_ddr_channels_pairs.emplace_back(buffer.release());
+ return std::ref(m_ddr_channels_pairs.back());
+}
+
+ExpectedRef<const DdrChannelsPair> ContextResources::get_ddr_channels_pair(uint8_t d2h_stream_index) const
+{
+ for (auto &ddr_channels_pair : m_ddr_channels_pairs) {
+ if (ddr_channels_pair.info().d2h_stream_index == d2h_stream_index) {
+ return std::ref(ddr_channels_pair);
+ }
+ }
+
+ LOGGER__ERROR("Couldn't find ddr channels pair for {}", d2h_stream_index);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+const std::vector<DdrChannelsPair> &ContextResources::get_ddr_channels_pairs() const
+{
+ return m_ddr_channels_pairs;
+}
+
+hailo_status ContextResources::validate_edge_layers()
+{
+ std::set<vdma::ChannelId> used_channel_ids;
+ for (const auto &edge_layer : m_edge_layers) {
+ CHECK(used_channel_ids.find(edge_layer.channel_id) == used_channel_ids.end(), HAILO_INTERNAL_FAILURE,
+ "Same stream use the same channel id {}", edge_layer.channel_id);
+ used_channel_ids.insert(edge_layer.channel_id);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+std::vector<ConfigBuffer> &ContextResources::get_config_buffers()
+{
+ return m_config_buffers;
+}
+
+static Expected<LatencyMeterPtr> create_hw_latency_meter(const std::vector<LayerInfo> &layers)
+{
+ std::set<std::string> d2h_channel_names;
+
+ size_t h2d_streams_count = 0;
+ for (const auto &layer : layers) {
+ if (layer.direction == HAILO_D2H_STREAM) {
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == layer.format.order) {
+ LOGGER__WARNING("HW Latency measurement is not supported on NMS networks");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ d2h_channel_names.insert(layer.name);
+ }
+ else {
+ h2d_streams_count++;
+ }
+ }
+
+ if (h2d_streams_count > 1) {
+ LOGGER__WARNING("HW Latency measurement is supported on networks with a single input");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ return make_shared_nothrow<LatencyMeter>(d2h_channel_names, MAX_IRQ_TIMESTAMPS_SIZE);
+}
+
+static Expected<LatencyMetersMap> create_latency_meters_from_config_params(
+ const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> core_op_metadata)
+{
+ LatencyMetersMap latency_meters_map;
+
+ if ((config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) {
+ // Best affort for starting latency meter.
+ auto networks_names = core_op_metadata->get_network_names();
+ for (auto &network_name : networks_names) {
+ auto layer_infos = core_op_metadata->get_all_layer_infos(network_name);
+ CHECK_EXPECTED(layer_infos);
+ auto latency_meter = create_hw_latency_meter(layer_infos.value());
+ if (latency_meter) {
+ latency_meters_map.emplace(network_name, latency_meter.release());
+ LOGGER__DEBUG("Starting hw latency measurement for network {}", network_name);
+ }
+ }
+ }
+
+ return latency_meters_map;
+}
+
+Expected<ResourcesManager> ResourcesManager::create(VdmaDevice &vdma_device, HailoRTDriver &driver,
+ const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> core_op_metadata,
+ uint8_t core_op_index)
+{
+ // Allocate config channels. In order to use the same channel ids for config channels in all contexts,
+ // we allocate all of them here, and use in preliminary/dynamic context.
+ ChannelAllocator allocator(driver.dma_engines_count());
+ std::vector<vdma::ChannelId> config_channels_ids;
+ const auto &config_channels_info = core_op_metadata->config_channels_info();
+ config_channels_ids.reserve(config_channels_info.size());
+ for (uint8_t cfg_index = 0; cfg_index < config_channels_info.size(); cfg_index++) {
+ const auto layer_identifier = std::make_tuple(LayerType::CFG, "", cfg_index);
+ const auto engine_index = config_channels_info[cfg_index].engine_index;
+ auto channel_id = allocator.get_available_channel_id(layer_identifier, HailoRTDriver::DmaDirection::H2D, engine_index);
+ CHECK_EXPECTED(channel_id);
+ config_channels_ids.push_back(channel_id.release());
+ }
+
+ auto network_index_map = core_op_metadata->get_network_names();
+
+ auto latency_meters = create_latency_meters_from_config_params(config_params, core_op_metadata);
+ CHECK_EXPECTED(latency_meters);
+ ResourcesManager resources_manager(vdma_device, driver, std::move(allocator), config_params,
+ std::move(core_op_metadata), core_op_index,
+ std::move(network_index_map), latency_meters.release(), std::move(config_channels_ids));
+
+ return resources_manager;
+}
+
+ResourcesManager::ResourcesManager(VdmaDevice &vdma_device, HailoRTDriver &driver,
+ ChannelAllocator &&channel_allocator, const ConfigureNetworkParams config_params,
+ std::shared_ptr<CoreOpMetadata> &&core_op_metadata,
+ uint8_t core_op_index, const std::vector<std::string> &&network_index_map,
+ LatencyMetersMap &&latency_meters,
+ std::vector<vdma::ChannelId> &&config_channels_ids) :
+ m_contexts_resources(),
+ m_channel_allocator(std::move(channel_allocator)),
+ m_vdma_device(vdma_device),
+ m_driver(driver),
+ m_config_params(config_params),
+ m_inter_context_buffers(),
+ m_core_op_metadata(std::move(core_op_metadata)),
+ m_core_op_index(core_op_index),
+ m_dynamic_context_count(0),
+ m_total_context_count(0),
+ m_network_index_map(std::move(network_index_map)),
+ m_latency_meters(std::move(latency_meters)),
+ m_boundary_channels(),
+ m_is_configured(false),
+ m_config_channels_ids(std::move(config_channels_ids)),
+ m_hw_only_boundary_buffers()
+{}
+
+ResourcesManager::ResourcesManager(ResourcesManager &&other) noexcept :
+ m_contexts_resources(std::move(other.m_contexts_resources)),
+ m_channel_allocator(std::move(other.m_channel_allocator)),
+ m_vdma_device(other.m_vdma_device),
+ m_driver(other.m_driver),
+ m_config_params(other.m_config_params),
+ m_inter_context_buffers(std::move(other.m_inter_context_buffers)),
+ m_core_op_metadata(std::move(other.m_core_op_metadata)),
+ m_core_op_index(other.m_core_op_index),
+ m_dynamic_context_count(std::exchange(other.m_dynamic_context_count, static_cast<uint8_t>(0))),
+ m_total_context_count(std::exchange(other.m_total_context_count, static_cast<uint8_t>(0))),
+ m_network_index_map(std::move(other.m_network_index_map)),
+ m_latency_meters(std::move(other.m_latency_meters)),
+ m_boundary_channels(std::move(other.m_boundary_channels)),
+ m_is_configured(std::exchange(other.m_is_configured, false)),
+ m_config_channels_ids(std::move(other.m_config_channels_ids)),
+ m_hw_only_boundary_buffers(std::move(other.m_hw_only_boundary_buffers))
+{}
+
+hailo_status ResourcesManager::fill_infer_features(CONTROL_PROTOCOL__application_header_t &app_header)
+{
+ app_header.infer_features.preliminary_run_asap = m_core_op_metadata->supported_features().preliminary_run_asap;
+ return HAILO_SUCCESS;
+}
+
+
+hailo_status ResourcesManager::fill_validation_features(CONTROL_PROTOCOL__application_header_t &app_header)
+{
+ static const auto ABBALE_NOT_SUPPORTED = false;
+ // TODO: fix is_abbale_supported
+ // auto proto_message = hef.pimpl.proto_message();
+ // auto has_included_features = proto_message->has_included_features();
+ // if (has_included_features) {
+ // is_abbale_supported = proto_message->included_features().abbale();
+ // }
+ app_header.validation_features.is_abbale_supported = ABBALE_NOT_SUPPORTED;
+ return HAILO_SUCCESS;
+}
+
+hailo_status ResourcesManager::fill_network_batch_size(CONTROL_PROTOCOL__application_header_t &app_header)
+{
+ app_header.networks_count = static_cast<uint8_t>(m_config_params.network_params_by_name.size());
+ for (const auto &network_pair : m_config_params.network_params_by_name) {
+ auto network_name_from_params = network_pair.first;
+ uint8_t network_index = 0;
+ for (network_index = 0; network_index < m_network_index_map.size(); network_index++) {
+ auto const network_name_from_map = m_network_index_map[network_index];
+ if (network_name_from_map == network_name_from_params) {
+ auto batch_size = get_network_batch_size(network_name_from_params);
+ CHECK_EXPECTED_AS_STATUS(batch_size);
+ app_header.batch_size[network_index] = batch_size.value();
+ break;
+ }
+ }
+ if (m_network_index_map.size() == network_index) {
+ LOGGER__ERROR("Failed to find network with network name {}", network_name_from_params);
+ return HAILO_NOT_FOUND;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status ResourcesManager::fill_csm_buffer_size(CONTROL_PROTOCOL__application_header_t &app_header)
+{
+ // All config buffers on the same platform will have the same desc_page_size - because it is derived from the host
+ app_header.csm_buffer_size = std::min(m_driver.desc_max_page_size(), vdma::DEFAULT_DESC_PAGE_SIZE);
+ return HAILO_SUCCESS;
+}
+
+void ResourcesManager::process_interrupts(IrqData &&irq_data)
+{
+ assert(irq_data.channels_count <= ARRAY_ENTRIES(irq_data.channels_irq_data));
+ for (uint8_t irq_index = 0; irq_index < irq_data.channels_count; irq_index++) {
+ const auto &channel_irq_data = irq_data.channels_irq_data[irq_index];
+ auto boundary_channel = m_boundary_channels.find(channel_irq_data.channel_id);
+ if (std::end(m_boundary_channels) == boundary_channel) {
+ LOGGER__ERROR("Got interrupt for channel {}, but there is no such boundary channel", channel_irq_data.channel_id);
+ continue;
+ }
+
+ if ((channel_irq_data.host_error != 0) || (channel_irq_data.device_error != 0)) {
+ LOGGER__CRITICAL("Got error on channel {} host_error=0x{:x} device_error=0x{:x}",
+ channel_irq_data.channel_id, channel_irq_data.host_error, channel_irq_data.device_error);
+ continue;
+ }
+
+ if (!channel_irq_data.is_active) {
+ LOGGER__CRITICAL("Channel {} was aborted by external source", channel_irq_data.channel_id);
+ continue;
+ }
+
+ auto status = boundary_channel->second->trigger_channel_completion(channel_irq_data.desc_num_processed);
+ if ((status != HAILO_SUCCESS) &&
+ (status != HAILO_STREAM_ABORTED_BY_USER) &&
+ (status != HAILO_STREAM_NOT_ACTIVATED)) {
+ // Log error and continue gracefully to process other interrupts
+ LOGGER__ERROR("Trigger channel completion failed on channel {} with status {}", channel_irq_data.channel_id, status);
+ }
+ }
+}
+
+hailo_status ResourcesManager::create_boundary_vdma_channel(const LayerInfo &layer_info)
+{
+ // TODO: put in layer info
+ const auto channel_direction = layer_info.direction == HAILO_H2D_STREAM ? HailoRTDriver::DmaDirection::H2D :
+ HailoRTDriver::DmaDirection::D2H;
+ const auto channel_id = get_available_channel_id(to_layer_identifier(layer_info),
+ channel_direction, layer_info.dma_engine_index);
+ CHECK_EXPECTED_AS_STATUS(channel_id);
+
+ auto network_batch_size = get_network_batch_size(layer_info.network_name);
+ CHECK_EXPECTED_AS_STATUS(network_batch_size);
+
+ uint32_t min_active_trans = MIN_ACTIVE_TRANSFERS_SCALE * network_batch_size.value();
+ uint32_t max_active_trans = MAX_ACTIVE_TRANSFERS_SCALE * network_batch_size.value();
+
+ CHECK(IS_FIT_IN_UINT16(min_active_trans), HAILO_INVALID_ARGUMENT,
+ "calculated min_active_trans for vdma descriptor list is out of UINT16 range");
+ CHECK(IS_FIT_IN_UINT16(max_active_trans), HAILO_INVALID_ARGUMENT,
+ "calculated min_active_trans for vdma descriptor list is out of UINT16 range");
+
+ auto latency_meter = (contains(m_latency_meters, layer_info.network_name)) ? m_latency_meters.at(layer_info.network_name) : nullptr;
+
+ /* TODO - HRT-6829- page_size should be calculated inside the vDMA channel class create function */
+ const auto transfer_size = (layer_info.nn_stream_config.periph_bytes_per_buffer *
+ layer_info.nn_stream_config.core_buffers_per_frame);
+ auto desc_sizes_pair = vdma::DescriptorList::get_desc_buffer_sizes_for_single_transfer(m_driver,
+ static_cast<uint16_t>(min_active_trans), static_cast<uint16_t>(max_active_trans), transfer_size);
+ CHECK_EXPECTED_AS_STATUS(desc_sizes_pair);
+
+ const auto page_size = desc_sizes_pair->first;
+ const auto descs_count = (nullptr != std::getenv("HAILO_CONFIGURE_FOR_HW_INFER")) ?
+ MAX_DESCS_COUNT : desc_sizes_pair->second;
+
+ const auto channel_type = (0 == (m_config_params.stream_params_by_name.at(layer_info.name).flags & HAILO_STREAM_FLAGS_ASYNC)) ?
+ vdma::BoundaryChannel::Type::BUFFERED : vdma::BoundaryChannel::Type::ASYNC;
+ auto channel = vdma::BoundaryChannel::create(channel_id.value(), channel_direction, m_driver, descs_count, page_size,
+ layer_info.name, latency_meter, network_batch_size.value(), channel_type);
+ CHECK_EXPECTED_AS_STATUS(channel);
+
+ m_boundary_channels.emplace(channel_id.value(), channel.release());
+ return HAILO_SUCCESS;
+}
+
+Expected<vdma::BoundaryChannelPtr> ResourcesManager::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
+{
+ for (const auto &boundary_channel : m_boundary_channels) {
+ if (boundary_channel.second->stream_name() == stream_name) {
+ return vdma::BoundaryChannelPtr(boundary_channel.second);
+ }
+ }
+
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+Expected<std::shared_ptr<const vdma::BoundaryChannel>> ResourcesManager::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) const
+{
+ for (const auto &boundary_channel : m_boundary_channels) {
+ if (boundary_channel.second->stream_name() == stream_name) {
+ return std::shared_ptr<const vdma::BoundaryChannel>(boundary_channel.second);
+ }
+ }
+
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+hailo_power_mode_t ResourcesManager::get_power_mode() const
+{
+ return m_config_params.power_mode;
+}
+
+ExpectedRef<InterContextBuffer> ResourcesManager::create_inter_context_buffer(uint32_t transfer_size,
+ uint8_t src_stream_index, uint8_t src_context_index, const std::string &network_name, vdma::ChannelId d2h_channel_id)
+{
+ auto network_batch_size_exp = get_network_batch_size(network_name);
+ CHECK_EXPECTED(network_batch_size_exp);
+ auto network_batch_size = network_batch_size_exp.value();
+
+ auto buffer = InterContextBuffer::create(m_driver, transfer_size, network_batch_size, d2h_channel_id);
+ CHECK_EXPECTED(buffer);
+
+ const auto key = std::make_pair(src_context_index, src_stream_index);
+ auto emplace_res = m_inter_context_buffers.emplace(key, buffer.release());
+ return std::ref(emplace_res.first->second);
+}
+
+ExpectedRef<InterContextBuffer> ResourcesManager::get_inter_context_buffer(const IntermediateBufferKey &key)
+{
+ auto buffer_it = m_inter_context_buffers.find(key);
+ if (std::end(m_inter_context_buffers) == buffer_it) {
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+
+ return std::ref(buffer_it->second);
+}
+
+Expected<CONTROL_PROTOCOL__application_header_t> ResourcesManager::get_control_core_op_header()
+{
+ CONTROL_PROTOCOL__application_header_t app_header{};
+ app_header.dynamic_contexts_count = m_dynamic_context_count;
+
+ auto status = fill_infer_features(app_header);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Invalid infer features");
+ status = fill_validation_features(app_header);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Invalid validation features");
+ status = fill_network_batch_size(app_header);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Invalid network batch sizes");
+ status = fill_csm_buffer_size(app_header);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Invalid csm buffer size");
+
+ return app_header;
+}
+
+Expected<std::reference_wrapper<ContextResources>> ResourcesManager::add_new_context(CONTROL_PROTOCOL__context_switch_context_type_t type,
+ const ConfigBufferInfoMap &config_info)
+{
+ CHECK_AS_EXPECTED(m_total_context_count < std::numeric_limits<uint8_t>::max(), HAILO_INVALID_CONTEXT_COUNT);
+
+ auto context_resources = ContextResources::create(m_driver, type, m_config_channels_ids, config_info);
+ CHECK_EXPECTED(context_resources);
+
+ m_contexts_resources.emplace_back(context_resources.release());
+ m_total_context_count++;
+ if (CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC == type) {
+ m_dynamic_context_count++;
+ }
+
+ return std::ref(m_contexts_resources.back());
+}
+
+Expected<vdma::ChannelId> ResourcesManager::get_available_channel_id(const LayerIdentifier &layer_identifier,
+ HailoRTDriver::DmaDirection direction, uint8_t engine_index)
+{
+ if (m_driver.dma_type() == HailoRTDriver::DmaType::PCIE) {
+ // On PCIe we have only 1 engine. To support the same HEF with both PCIe and DRAM, we use default engine here
+ engine_index = vdma::DEFAULT_ENGINE_INDEX;
+ }
+
+ return m_channel_allocator.get_available_channel_id(layer_identifier, direction, engine_index);
+}
+
+hailo_status ResourcesManager::free_channel_index(const LayerIdentifier &layer_identifier)
+{
+ return m_channel_allocator.free_channel_index(layer_identifier);
+}
+
+Expected<hailo_stream_interface_t> ResourcesManager::get_default_streams_interface()
+{
+ return m_vdma_device.get_default_streams_interface();
+}
+
+hailo_status ResourcesManager::set_inter_context_channels_dynamic_batch_size(uint16_t dynamic_batch_size)
+{
+ for (auto &key_buff_pair : m_inter_context_buffers) {
+ const auto status = key_buff_pair.second.reprogram(dynamic_batch_size);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<uint16_t> ResourcesManager::get_network_batch_size(const std::string &network_name) const
+{
+ for (auto const &network_map : m_config_params.network_params_by_name) {
+ auto const network_name_from_params = network_map.first;
+ if (network_name_from_params == network_name) {
+ auto actual_batch_size = network_map.second.batch_size;
+ if (HAILO_DEFAULT_BATCH_SIZE == actual_batch_size) {
+ actual_batch_size = DEFAULT_ACTUAL_BATCH_SIZE;
+ }
+ return actual_batch_size;
+ }
+ }
+
+ LOGGER__ERROR("Failed to find network with network name {}", network_name);
+
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+Expected<Buffer> ResourcesManager::read_intermediate_buffer(const IntermediateBufferKey &key)
+{
+ auto inter_context_buffer_it = m_inter_context_buffers.find(key);
+ if (std::end(m_inter_context_buffers) != inter_context_buffer_it) {
+ return inter_context_buffer_it->second.read();
+ }
+
+ const auto dynamic_context_index = key.first;
+ const size_t context_index = dynamic_context_index + CONTROL_PROTOCOL__CONTEXT_SWITCH_NUMBER_OF_NON_DYNAMIC_CONTEXTS;
+ CHECK_AS_EXPECTED(context_index < m_contexts_resources.size(), HAILO_NOT_FOUND, "Context index {} out of range",
+ dynamic_context_index);
+ const auto d2h_stream_index = key.second;
+ if (auto ddr_channels_pair = m_contexts_resources[context_index].get_ddr_channels_pair(d2h_stream_index)) {
+ return ddr_channels_pair->get().read();
+ }
+
+ LOGGER__ERROR("Failed to find intermediate buffer for src_context {}, src_stream_index {}", key.first,
+ key.second);
+ return make_unexpected(HAILO_NOT_FOUND);
+
+}
+
+hailo_status ResourcesManager::configure()
+{
+ CHECK(!m_is_configured, HAILO_INTERNAL_FAILURE, "Can't configure the same core-op twice");
+ m_is_configured = true;
+
+ auto core_op_header = get_control_core_op_header();
+ CHECK_EXPECTED_AS_STATUS(core_op_header);
+
+ auto status = Control::context_switch_set_network_group_header(m_vdma_device, core_op_header.release());
+ CHECK_SUCCESS(status);
+
+ for (const auto &context : m_contexts_resources) {
+ status = Control::context_switch_set_context_info(m_vdma_device, context.get_controls());
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status ResourcesManager::enable_state_machine(uint16_t dynamic_batch_size)
+{
+ return Control::enable_core_op(m_vdma_device, m_core_op_index, dynamic_batch_size);
+}
+
+hailo_status ResourcesManager::reset_state_machine(bool keep_nn_config_during_reset)
+{
+ auto status = Control::reset_context_switch_state_machine(m_vdma_device, keep_nn_config_during_reset);
+ CHECK_SUCCESS(status);
+
+ if (!keep_nn_config_during_reset && (Device::Type::INTEGRATED == m_vdma_device.get_type())) {
+ // On core device, the nn_manager is not responsible to reset the nn-core so
+ // we use the SCU control for that.
+ status = m_vdma_device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status ResourcesManager::cancel_pending_async_transfers()
+{
+ for (const auto &boundary_channel : m_boundary_channels) {
+ if (boundary_channel.second->type() != vdma::BoundaryChannel::Type::ASYNC) {
+ continue;
+ }
+
+ // Best effort
+ const auto status = boundary_channel.second->cancel_pending_transfers();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed cancellation of pending transfers on async channel {}", boundary_channel.second->stream_name());
+ }
+ }
+ return HAILO_SUCCESS;
+}
+
+hailo_status ResourcesManager::start_vdma_interrupts_dispatcher()
+{
+ auto interrupts_dispatcher = m_vdma_device.get_vdma_interrupts_dispatcher();
+ CHECK_EXPECTED_AS_STATUS(interrupts_dispatcher);
+
+ ChannelsBitmap channels_bitmap{};
+ for (const auto &boundary_channel : m_boundary_channels) {
+ const auto channel_id = boundary_channel.first;
+ channels_bitmap[channel_id.engine_index] |= (1 << channel_id.channel_index);
+ }
+
+ const bool enable_timestamp_measure = !m_latency_meters.empty();
+ return interrupts_dispatcher->get().start(channels_bitmap, enable_timestamp_measure, [this](IrqData &&irq_data){
+ process_interrupts(std::move(irq_data));
+ });
+}
+
+hailo_status ResourcesManager::stop_vdma_interrupts_dispatcher()
+{
+ auto interrupts_dispatcher = m_vdma_device.get_vdma_interrupts_dispatcher();
+ CHECK_EXPECTED_AS_STATUS(interrupts_dispatcher);
+ return interrupts_dispatcher->get().stop();
+}
+
+Expected<uint16_t> ResourcesManager::program_desc_for_hw_only_flow(std::shared_ptr<vdma::DescriptorList> desc_list,
+ const uint32_t single_transfer_size, const uint16_t dynamic_batch_size, const uint16_t batch_count)
+{
+ size_t acc_desc_offset = 0;
+ for (uint16_t batch_index = 0; batch_index < batch_count; batch_index++) {
+ for (uint16_t transfer_index = 0; transfer_index < dynamic_batch_size; transfer_index++) {
+ const auto last_desc_interrupts_domain = ((dynamic_batch_size - 1) == transfer_index) ?
+ vdma::InterruptsDomain::DEVICE : vdma::InterruptsDomain::NONE;
+ static const auto BUFFER_NOT_CIRCULAR = false;
+ auto desc_count_local = desc_list->program_last_descriptor(single_transfer_size,
+ last_desc_interrupts_domain, acc_desc_offset, BUFFER_NOT_CIRCULAR);
+ CHECK_EXPECTED(desc_count_local, "Failed to program descs for inter context channels. Given max_batch_size is too big.");
+ acc_desc_offset += desc_count_local.value();
+ }
+ }
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(acc_desc_offset), HAILO_INTERNAL_FAILURE,
+ "calculated acc_desc_offset for vdma descriptor list is out of UINT16 range");
+ return static_cast<uint16_t>(acc_desc_offset);
+}
+
+Expected<std::pair<vdma::ChannelId, uint16_t>> ResourcesManager::create_mapped_buffer_for_hw_only_infer(
+ vdma::BoundaryChannelPtr boundary_channel_ptr, const hailo_vdma_buffer_direction_flags_t direction,
+ const uint32_t single_transfer_size, const uint16_t dynamic_batch_size, const uint16_t batch_count)
+{
+ auto total_frames_per_run = dynamic_batch_size * batch_count;
+ auto total_run_transfer_size = total_frames_per_run * single_transfer_size;
+
+ auto desc_list = boundary_channel_ptr->get_desc_list();
+ auto total_desc_count = desc_list->descriptors_in_buffer(total_run_transfer_size);
+
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(total_desc_count), HAILO_INVALID_ARGUMENT,
+ "calculated total_desc_count for vdma descriptor list is out of UINT16 range");
+
+ auto mapped_buffer_exp = DmaMappedBuffer::create(total_desc_count * desc_list->desc_page_size(), direction, m_vdma_device);
+ CHECK_EXPECTED(mapped_buffer_exp);
+
+ auto mapped_buffer = make_shared_nothrow<DmaMappedBuffer>(mapped_buffer_exp.release());
+ CHECK_NOT_NULL_AS_EXPECTED(mapped_buffer, HAILO_OUT_OF_HOST_MEMORY);
+ m_hw_only_boundary_buffers.push_back(mapped_buffer);
+
+ uint32_t STARTING_DESC = 0;
+ auto status = desc_list->configure_to_use_buffer(*mapped_buffer, boundary_channel_ptr->get_channel_id(), STARTING_DESC);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto desc_programed = program_desc_for_hw_only_flow(desc_list, single_transfer_size, dynamic_batch_size, batch_count);
+ CHECK_EXPECTED(desc_programed);
+
+ auto channel_info_pair = std::make_pair(boundary_channel_ptr->get_channel_id(), desc_programed.release());
+
+ return channel_info_pair;
+}
+
+void ResourcesManager::add_channel_to_hw_infer_channel_info(std::pair<vdma::ChannelId, uint16_t> channel_info,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t &channels_info)
+{
+ auto next_chnanel_info = &channels_info.channel_info[channels_info.channel_count];
+ assert(channels_info.channel_count < CONTROL_PROTOCOL__MAX_TOTAL_CHANNEL_COUNT);
+
+ next_chnanel_info->engine_index = channel_info.first.engine_index;
+ next_chnanel_info->channel_index = channel_info.first.channel_index;
+ next_chnanel_info->desc_programed = channel_info.second;
+
+ channels_info.channel_count++;
+}
+
+Expected<uint16_t> ResourcesManager::calc_hw_infer_batch_count(uint16_t dynamic_batch_size)
+{
+ uint16_t batch_count = UINT16_MAX;
+ for (const auto &layer_info : m_core_op_metadata->get_all_layer_infos()) {
+ const auto stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
+ const auto single_transfer_size = (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) ?
+ stream_info.nms_info.bbox_size : stream_info.hw_frame_size;
+ auto boundary_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(layer_info.name);
+ CHECK_EXPECTED(boundary_channel_ptr_exp);
+ auto boundary_channel_ptr = boundary_channel_ptr_exp.release();
+ const auto max_batch_transfers = boundary_channel_ptr->get_desc_list()->max_transfers(single_transfer_size * dynamic_batch_size);
+ // infer batch count is the lowest number of "Max transfers" per descriptor list that for all given boundary channels.
+ batch_count = MIN(batch_count, max_batch_transfers);
+ }
+ return batch_count;
+}
+
+void ResourcesManager::hw_infer_calc_stats(uint16_t batch_count, uint16_t dynamic_batch_size,
+ size_t single_frame_transfer_size, uint32_t infer_cycles)
+{
+ const auto total_transfer_size = single_frame_transfer_size * dynamic_batch_size * batch_count;
+ const auto total_frames = dynamic_batch_size * batch_count;
+
+ // TODO - get clock rate from Chip (still not supported in VPU mode)
+ const float32_t CPU_CLOCK_RATE = static_cast<float32_t>(5.0 / (1000 * 1000 * 1000));
+ const float32_t time_sec = static_cast<float32_t>(infer_cycles) * CPU_CLOCK_RATE;
+ const float32_t fps = static_cast<float32_t>(total_frames) / time_sec;
+ const float32_t BYTE_TO_BIT = 8.0;
+ const float32_t BITS_TO_GBIT = static_cast<float32_t>(1.0 * 1000 * 1000 * 1000);
+ const float32_t BW_Gbps = static_cast<float32_t>(total_transfer_size) * BYTE_TO_BIT / time_sec / BITS_TO_GBIT;
+ LOGGER__ERROR("\nBatch count - {}\nTotal transfer size: {}\ntotal_frames - {}\ntime_sec - {}\nfps - {}\nBW_Gbps - {}",
+ batch_count, total_transfer_size, total_frames, time_sec, fps, BW_Gbps);
+}
+
+Expected<CONTROL_PROTOCOL__hw_only_infer_results_t> ResourcesManager::run_hw_only_infer(uint16_t dynamic_batch_size)
+{
+ CONTROL_PROTOCOL__hw_only_infer_results_t infer_results = {};
+ CONTROL_PROTOCOL__hw_infer_channels_info_t channels_info = {};
+ channels_info.channel_count = 0;
+
+ CHECK_AS_EXPECTED(dynamic_batch_size <= m_config_params.batch_size, HAILO_INVALID_ARGUMENT,
+ "Dynamic batch size must be up to configured batch size");
+
+ auto batch_count = calc_hw_infer_batch_count(dynamic_batch_size);
+ CHECK_EXPECTED(batch_count);
+
+ for (const auto &layer_info : m_core_op_metadata->get_all_layer_infos()) {
+ auto boundary_channel_ptr = get_boundary_vdma_channel_by_stream_name(layer_info.name);
+ CHECK_EXPECTED(boundary_channel_ptr);
+ auto stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
+ auto single_transfer_size = (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) ?
+ stream_info.nms_info.bbox_size : stream_info.hw_frame_size;
+ const auto direction = (layer_info.direction == HAILO_H2D_STREAM) ?
+ HAILO_VDMA_BUFFER_DIRECTION_FLAGS_H2D : HAILO_VDMA_BUFFER_DIRECTION_FLAGS_D2H;
+
+ auto channel_info_pair = create_mapped_buffer_for_hw_only_infer(boundary_channel_ptr.release(), direction,
+ single_transfer_size, dynamic_batch_size, batch_count.value());
+ CHECK_EXPECTED(channel_info_pair);
+
+ add_channel_to_hw_infer_channel_info(channel_info_pair.release(), channels_info);
+ }
+
+ auto status = Control::start_hw_only_infer(m_vdma_device, m_core_op_index, dynamic_batch_size, &channels_info);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // Delay until infer ends
+ // TODO HRT-9829 - chagne to notification from FW
+ std::this_thread::sleep_for(std::chrono::milliseconds(20000));
+
+ status = Control::stop_hw_only_infer(m_vdma_device, &infer_results);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto single_frame_transfer_size = m_core_op_metadata->get_total_transfer_size();
+ CHECK_EXPECTED(single_frame_transfer_size);
+
+ hw_infer_calc_stats(batch_count.value(), dynamic_batch_size, single_frame_transfer_size.release(), infer_results.infer_cycles);
+
+ return infer_results;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file resource_manager.hpp
+ * @brief Manager for vdma-config core-op resources, for a specific physical device
+ *
+ * ResourceManager is used on 2 possible flows with the following dependencies:
+ *
+ * !-Working with physical device-!
+ * VdmaDevice (either PcieDevice or IntegratedDevice)
+ * |--vector of VdmaConfigCoreOp
+ * |--ResourceManager <only one>
+ * |--reference to physical device
+ *
+ * !-Working with virtual device-!
+ * VDevice
+ * |--vector of VdmaDevice (either PcieDevice or IntegratedDevice)
+ * |--vector of VDeviceCoreOp
+ * |-- vector of VdmaConfigCoreOp <one per phys device>
+ * |--ResourceManager <only one>
+ * |--reference to physical device
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_RESOURCE_MANAGER_HPP_
+#define _HAILO_CONTEXT_SWITCH_RESOURCE_MANAGER_HPP_
+
+#include "hailo/hailort.h"
+
+#include "core_op/resource_manager/inter_context_buffer.hpp"
+#include "core_op/resource_manager/ddr_channels_pair.hpp"
+#include "core_op/resource_manager/config_buffer.hpp"
+#include "core_op/resource_manager/channel_allocator.hpp"
+#include "core_op/resource_manager/context_switch_buffer_builder.hpp"
+#include "device_common/control_protocol.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+#include "vdma/pcie/pcie_device.hpp"
+
+
+namespace hailort
+{
+
+#define DEFAULT_ACTUAL_BATCH_SIZE (1)
+
+
+struct EdgeLayer {
+ LayerInfo layer_info;
+ vdma::ChannelId channel_id;
+ CONTROL_PROTOCOL__host_buffer_info_t buffer_info;
+};
+
+class ContextResources final {
+public:
+ static Expected<ContextResources> create(HailoRTDriver &driver, CONTROL_PROTOCOL__context_switch_context_type_t context_type,
+ const std::vector<vdma::ChannelId> &config_channels_ids, const ConfigBufferInfoMap &config_buffer_infos);
+
+ const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &get_controls() const;
+ ContextSwitchBufferBuilder &builder();
+
+ void add_edge_layer(const LayerInfo &layer_info, vdma::ChannelId channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &buffer_info);
+
+ std::vector<EdgeLayer> get_edge_layers() const;
+ std::vector<EdgeLayer> get_edge_layers(LayerType layer_type) const;
+ std::vector<EdgeLayer> get_edge_layers(hailo_stream_direction_t direction) const;
+ std::vector<EdgeLayer> get_edge_layers(LayerType layer_type, hailo_stream_direction_t direction) const;
+
+ Expected<EdgeLayer> get_edge_layer_by_stream_index(uint8_t stream_index) const;
+
+ ExpectedRef<DdrChannelsPair> create_ddr_channels_pair(const DdrChannelsInfo &ddr_info);
+ ExpectedRef<const DdrChannelsPair> get_ddr_channels_pair(uint8_t d2h_stream_index) const;
+ const std::vector<DdrChannelsPair> &get_ddr_channels_pairs() const;
+
+ hailo_status validate_edge_layers();
+
+ std::vector<ConfigBuffer> &get_config_buffers();
+
+private:
+ ContextResources(HailoRTDriver &driver, CONTROL_PROTOCOL__context_switch_context_type_t context_type,
+ std::vector<ConfigBuffer> &&config_buffers) :
+ m_driver(std::ref(driver)),
+ m_builder(context_type),
+ m_config_buffers(std::move(config_buffers))
+ {}
+
+ std::reference_wrapper<HailoRTDriver> m_driver;
+ ContextSwitchBufferBuilder m_builder;
+ std::vector<ConfigBuffer> m_config_buffers;
+ std::vector<DdrChannelsPair> m_ddr_channels_pairs;
+
+ std::vector<EdgeLayer> m_edge_layers;
+};
+
+class ResourcesManager final
+{
+public:
+ static Expected<ResourcesManager> create(VdmaDevice &vdma_device, HailoRTDriver &driver,
+ const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> core_op_metadata,
+ uint8_t core_op_index);
+
+ // TODO: HRT-9432 needs to call stop_vdma_interrupts_dispatcher and any other resource on dtor.
+ ~ResourcesManager() = default;
+ ResourcesManager(const ResourcesManager &other) = delete;
+ ResourcesManager &operator=(const ResourcesManager &other) = delete;
+ ResourcesManager &operator=(ResourcesManager &&other) = delete;
+ ResourcesManager(ResourcesManager &&other) noexcept;
+
+ ExpectedRef<InterContextBuffer> create_inter_context_buffer(uint32_t transfer_size, uint8_t src_stream_index,
+ uint8_t src_context_index, const std::string &network_name, vdma::ChannelId d2h_channel_id);
+ ExpectedRef<InterContextBuffer> get_inter_context_buffer(const IntermediateBufferKey &key);
+ hailo_status create_boundary_vdma_channel(const LayerInfo &layer_info);
+
+ Expected<CONTROL_PROTOCOL__application_header_t> get_control_core_op_header();
+
+ Expected<std::reference_wrapper<ContextResources>> add_new_context(CONTROL_PROTOCOL__context_switch_context_type_t type,
+ const ConfigBufferInfoMap &config_info={});
+
+ const SupportedFeatures &get_supported_features() const
+ {
+ return m_core_op_metadata->supported_features();
+ }
+
+ VdmaDevice &get_device()
+ {
+ return m_vdma_device;
+ }
+
+ Expected<vdma::ChannelId> get_available_channel_id(const LayerIdentifier &layer_identifier,
+ HailoRTDriver::DmaDirection direction, uint8_t engine_index);
+ hailo_status free_channel_index(const LayerIdentifier &layer_identifier);
+
+ const char* get_dev_id() const
+ {
+ return m_vdma_device.get_dev_id();
+ }
+
+ LatencyMetersMap &get_latency_meters()
+ {
+ return m_latency_meters;
+ }
+
+ std::map<vdma::ChannelId, vdma::BoundaryChannelPtr> get_boundary_vdma_channels() const
+ {
+ return m_boundary_channels;
+ }
+
+ Expected<hailo_stream_interface_t> get_default_streams_interface();
+
+ Expected<Buffer> read_intermediate_buffer(const IntermediateBufferKey &key);
+
+ hailo_status set_inter_context_channels_dynamic_batch_size(uint16_t dynamic_batch_size);
+ hailo_status configure();
+ hailo_status enable_state_machine(uint16_t dynamic_batch_size);
+ hailo_status reset_state_machine(bool keep_nn_config_during_reset = false);
+ hailo_status cancel_pending_async_transfers();
+ hailo_status start_vdma_interrupts_dispatcher();
+ hailo_status stop_vdma_interrupts_dispatcher();
+ Expected<uint16_t> get_network_batch_size(const std::string &network_name) const;
+ Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name);
+ Expected<std::shared_ptr<const vdma::BoundaryChannel>> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) const;
+ hailo_power_mode_t get_power_mode() const;
+ Expected<uint16_t> program_desc_for_hw_only_flow(std::shared_ptr<vdma::DescriptorList> desc_list,
+ const uint32_t single_transfer_size, const uint16_t dynamic_batch_size, const uint16_t batch_count);
+ Expected<std::pair<vdma::ChannelId, uint16_t>> create_mapped_buffer_for_hw_only_infer(
+ vdma::BoundaryChannelPtr boundary_channel_ptr, const hailo_vdma_buffer_direction_flags_t direction,
+ const uint32_t single_transfer_size, const uint16_t dynamic_batch_size, const uint16_t batch_count);
+ void add_channel_to_hw_infer_channel_info(std::pair<vdma::ChannelId, uint16_t> channel_info,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t &channels_info);
+ Expected<uint16_t> calc_hw_infer_batch_count(uint16_t dynamic_batch_size);
+ void hw_infer_calc_stats(uint16_t batch_count, uint16_t dynamic_batch_size,
+ size_t single_frame_transfer_size, uint32_t infer_cycles);
+ Expected<CONTROL_PROTOCOL__hw_only_infer_results_t> run_hw_only_infer(uint16_t dynamic_batch_size);
+
+private:
+ hailo_status fill_infer_features(CONTROL_PROTOCOL__application_header_t &app_header);
+ hailo_status fill_validation_features(CONTROL_PROTOCOL__application_header_t &app_header);
+ hailo_status fill_network_batch_size(CONTROL_PROTOCOL__application_header_t &app_header);
+ hailo_status fill_csm_buffer_size(CONTROL_PROTOCOL__application_header_t &app_header);
+ void process_interrupts(IrqData &&irq_data);
+
+ std::vector<ContextResources> m_contexts_resources;
+ ChannelAllocator m_channel_allocator;
+ VdmaDevice &m_vdma_device;
+ HailoRTDriver &m_driver;
+ const ConfigureNetworkParams m_config_params;
+ std::map<IntermediateBufferKey, InterContextBuffer> m_inter_context_buffers;
+ std::shared_ptr<CoreOpMetadata> m_core_op_metadata;
+ uint8_t m_core_op_index;
+ uint8_t m_dynamic_context_count;
+ uint8_t m_total_context_count;
+ const std::vector<std::string> m_network_index_map;
+ LatencyMetersMap m_latency_meters; // Latency meter per network
+ // TODO: HRT-9429 - fast access to channel by id, using array, using engine_index and channel_index.
+ std::map<vdma::ChannelId, vdma::BoundaryChannelPtr> m_boundary_channels;
+ bool m_is_configured;
+ // Config channels ids are shared between all context. The following vector contains the channel id for each
+ // config_stream_index.
+ std::vector<vdma::ChannelId> m_config_channels_ids;
+ // Mapped buffers would be used only in hw only flow
+ std::vector<std::shared_ptr<DmaMappedBuffer>> m_hw_only_boundary_buffers;
+
+ ResourcesManager(VdmaDevice &vdma_device, HailoRTDriver &driver,
+ ChannelAllocator &&channel_allocator, const ConfigureNetworkParams config_params,
+ std::shared_ptr<CoreOpMetadata> &&core_op_metadata, uint8_t core_op_index,
+ const std::vector<std::string> &&network_index_map, LatencyMetersMap &&latency_meters,
+ std::vector<vdma::ChannelId> &&config_channels_ids);
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_RESOURCE_MANAGER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file resource_manager_builder.cpp
+ * @brief Builds a ResourcesManager object for the given CoreOp.
+ **/
+
+#include "resource_manager_builder.hpp"
+#include "device_common/control.hpp"
+
+
+namespace hailort
+{
+
+
+static uint16_t calculate_periph_buffers_per_frame(const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
+ uint16_t min_periph_buffers_per_frame, uint32_t frame_size, uint16_t periph_buffers_per_frame)
+{
+ const auto max_periph_buffers_per_frame = MIN(frame_size, static_cast<uint32_t>(hw_consts.max_periph_buffers_per_frame));
+ // Fifo copies FIFO_WORD_GRANULARITY_IN_BYTES each time from/to the fifo
+ const uint32_t frame_size_words_count = frame_size / hw_consts.fifo_word_granularity_bytes;
+ // Look for the highest periph_bytes_per_buffer (frame_size / periph_buffers_per_frame) that is a multiple of FIFO_WORD_GRANULARITY_IN_BYTES
+ for (uint16_t i = min_periph_buffers_per_frame; i < max_periph_buffers_per_frame; i++) {
+ // (0 == (frame_size_words_count % i) ensures periph_bytes_per_buffer will be a multiple of FIFO_WORD_GRANULARITY_IN_BYTES
+ if ((0 == (frame_size_words_count % i)) && (hw_consts.max_periph_bytes_per_buffer >= (frame_size / i))) {
+ return i;
+ }
+ }
+
+ // Fallback to frame_size unless it exceeds MAX_PERIPH_BUFFERS_PER_FRAME
+ if (hw_consts.max_periph_buffers_per_frame < frame_size) {
+ return periph_buffers_per_frame;
+ } else {
+ return static_cast<uint16_t>(frame_size);
+ }
+}
+
+static hailo_status calculate_credit_params(const CONTROL_PROTOCOL__hw_consts_t &hw_consts, uint16_t desc_page_size,
+ hailo_stream_direction_t direction, bool should_optimize_credits, uint16_t *periph_bytes_per_buffer,
+ uint16_t *periph_buffers_per_frame)
+{
+ // Next parameters differ between RX and TX
+
+ auto local_periph_bytes_per_buffer = (*periph_bytes_per_buffer);
+ auto local_periph_buffers_per_frame = (*periph_buffers_per_frame);
+ uint32_t periph_frame_size = (*periph_bytes_per_buffer) * (*periph_buffers_per_frame);
+ const auto max_bytes_per_buffer = MAX(hw_consts.max_acceptable_bytes_per_buffer, (*periph_bytes_per_buffer));
+
+ if (0 != (local_periph_bytes_per_buffer % hw_consts.fifo_word_granularity_bytes)) {
+ return HAILO_INTERNAL_FAILURE;
+ }
+
+ if (should_optimize_credits) {
+ // If credits optimizations flag is on, assuming periph_buffers_per_frame * periph_bytes_per_buffer == periph_frame_size
+ // Find the lowest periph_buffers_per_frame that divides periph_frame_size and is bigger than periph_frame_size / max_bytes_per_buffer
+ // Also, periph_bytes_per_buffer must be a multiple of 8
+ const auto min_periph_buffers_per_frame = DIV_ROUND_UP(periph_frame_size, max_bytes_per_buffer);
+ local_periph_buffers_per_frame = calculate_periph_buffers_per_frame(hw_consts, static_cast<uint16_t>(min_periph_buffers_per_frame),
+ periph_frame_size, local_periph_buffers_per_frame);
+ assert(IS_FIT_IN_UINT16(periph_frame_size / local_periph_buffers_per_frame));
+ local_periph_bytes_per_buffer = static_cast<uint16_t>(periph_frame_size / local_periph_buffers_per_frame); // Must be integer according to last function
+ }
+ // Periph credits size must be lower than the following value to make sure that the credit size allows
+ // for at least desc_page_size bytes left in the FIFO for the last descriptor in the pattern
+ if ((direction == HAILO_D2H_STREAM) &&
+ (static_cast<uint32_t>(local_periph_bytes_per_buffer) > (hw_consts.outbound_data_stream_size - 8 - desc_page_size))) {
+ LOGGER__ERROR("Current periph_bytes_per_buffer is {} which is too high. Exiting.", local_periph_bytes_per_buffer);
+ return HAILO_INTERNAL_FAILURE;
+ }
+
+ *periph_bytes_per_buffer = local_periph_bytes_per_buffer;
+ *periph_buffers_per_frame = local_periph_buffers_per_frame;
+ return HAILO_SUCCESS;
+}
+
+static Expected<LayerInfo> update_layer_info(const LayerInfo &original_layer_info,
+ const CONTROL_PROTOCOL__host_buffer_info_t &buffer_info,
+ const CONTROL_PROTOCOL__hw_consts_t &hw_consts, bool should_optimize_credits)
+{
+ LayerInfo local_layer_info = original_layer_info;
+
+ auto status = calculate_credit_params(hw_consts, buffer_info.desc_page_size, local_layer_info.direction,
+ should_optimize_credits, &local_layer_info.nn_stream_config.periph_bytes_per_buffer,
+ &local_layer_info.nn_stream_config.periph_buffers_per_frame);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ if (local_layer_info.max_shmifo_size == 0) {
+ local_layer_info.max_shmifo_size = hw_consts.default_initial_credit_size;
+ }
+
+ return local_layer_info;
+}
+
+static hailo_status fill_boundary_input_layer(ContextResources &context_resources,
+ ResourcesManager &resources_manager, const LayerInfo layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
+ bool should_optimize_credits)
+{
+ const auto transfer_size = (layer_info.nn_stream_config.periph_bytes_per_buffer *
+ layer_info.nn_stream_config.core_buffers_per_frame);
+
+ auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
+ CHECK_EXPECTED_AS_STATUS(vdma_channel);
+
+ const auto buffer_info = vdma_channel.value()->get_boundary_buffer_info(transfer_size);
+ auto local_layer_info = update_layer_info(layer_info, buffer_info, hw_consts, should_optimize_credits);
+ CHECK_EXPECTED_AS_STATUS(local_layer_info);
+
+ const auto channel_id = vdma_channel.value()->get_channel_id();
+ context_resources.add_edge_layer(local_layer_info.value(), channel_id, buffer_info);
+
+ LOGGER__DEBUG("Boundary input stream: {} h2d_channel: {}.", layer_info.stream_index, channel_id);
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_inter_context_input_layer(ContextResources &context_resources,
+ ResourcesManager &resources_manager, const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
+ bool should_optimize_credits)
+{
+ const auto channel_id = resources_manager.get_available_channel_id(to_layer_identifier(layer_info),
+ HailoRTDriver::DmaDirection::H2D, layer_info.dma_engine_index);
+ CHECK_EXPECTED_AS_STATUS(channel_id);
+
+ /* Get inter context buffer previously created */
+ const auto &connected_context = layer_info.connected_context_info;
+ auto intermediate_buffer_key = std::make_pair(connected_context.context_index, connected_context.stream_index);
+ auto inter_context_buffer_exp = resources_manager.get_inter_context_buffer(intermediate_buffer_key);
+ CHECK_EXPECTED_AS_STATUS(inter_context_buffer_exp, "Failed to find inter context buffer for src context {}, src_stream_index {}",
+ connected_context.context_index, connected_context.stream_index);
+ auto &inter_context_buffer = inter_context_buffer_exp->get();
+
+ auto local_layer_info = update_layer_info(layer_info, inter_context_buffer.get_host_buffer_info(), hw_consts,
+ should_optimize_credits);
+ CHECK_EXPECTED_AS_STATUS(local_layer_info);
+
+ context_resources.add_edge_layer(local_layer_info.value(), channel_id.value(),
+ inter_context_buffer.get_host_buffer_info());
+
+ LOGGER__DEBUG("Intermediate input stream {}, src_context:{}, dst_context: {}, h2d_channel {}.",
+ layer_info.stream_index, layer_info.context_index, layer_info.connected_context_info.context_index,
+ channel_id.value());
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_boundary_output_layer(ContextResources &context_resources,
+ ResourcesManager &resources_manager, const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
+ bool should_optimize_credits)
+{
+ const auto transfer_size = (layer_info.nn_stream_config.periph_bytes_per_buffer *
+ layer_info.nn_stream_config.core_buffers_per_frame);
+
+ auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
+ CHECK_EXPECTED_AS_STATUS(vdma_channel);
+
+ const auto buffer_info = vdma_channel.value()->get_boundary_buffer_info(transfer_size);
+ auto local_layer_info = update_layer_info(layer_info, buffer_info, hw_consts, should_optimize_credits);
+ CHECK_EXPECTED_AS_STATUS(local_layer_info);
+
+ const auto channel_id = vdma_channel.value()->get_channel_id();
+ context_resources.add_edge_layer(local_layer_info.value(), channel_id, buffer_info);
+
+ LOGGER__DEBUG("Boundary output stream: {} d2h_channel: {}.", layer_info.stream_index, channel_id);
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_inter_context_output_layer(ContextResources &context_resources,
+ ResourcesManager &resources_manager, const LayerInfo &layer_info,
+ const CONTROL_PROTOCOL__hw_consts_t &hw_consts, bool should_optimize_credits)
+{
+ const auto channel_id = resources_manager.get_available_channel_id(to_layer_identifier(layer_info),
+ HailoRTDriver::DmaDirection::D2H, layer_info.dma_engine_index);
+ CHECK_EXPECTED_AS_STATUS(channel_id);
+
+ const auto frame_credits_in_bytes = (layer_info.nn_stream_config.periph_bytes_per_buffer *
+ layer_info.nn_stream_config.core_buffers_per_frame);
+
+ auto inter_context_buffer_exp = resources_manager.create_inter_context_buffer(frame_credits_in_bytes,
+ layer_info.stream_index, layer_info.context_index, layer_info.network_name, channel_id.value());
+ CHECK_EXPECTED_AS_STATUS(inter_context_buffer_exp);
+ auto &inter_context_buffer = inter_context_buffer_exp->get();
+
+ auto local_layer_info = update_layer_info(layer_info, inter_context_buffer.get_host_buffer_info(), hw_consts,
+ should_optimize_credits);
+ CHECK_EXPECTED_AS_STATUS(local_layer_info);
+
+ context_resources.add_edge_layer(local_layer_info.value(), channel_id.value(),
+ inter_context_buffer.get_host_buffer_info());
+
+ LOGGER__DEBUG("Inter-context output stream {}, src_context:{}, d2h_channel {}.",
+ layer_info.stream_index, layer_info.context_index, channel_id.value());
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_ddr_output_layer(ContextResources &context_resources,
+ ResourcesManager &resources_manager, const LayerInfo &layer_info,
+ const CONTROL_PROTOCOL__hw_consts_t &hw_consts)
+{
+ CHECK(resources_manager.get_supported_features().padded_ddr_buffers, HAILO_INVALID_HEF,
+ "Failed opening non-compatible HEF that uses the following deprecated features: host-managed DDR buffers."
+ "Please re-compile the HEF using a newer Dataflow Compiler version (v3.11.0 or newer)");
+ // Allocate resources and prepare ddr_info
+
+ DdrChannelsInfo ddr_pair_info = {};
+ ddr_pair_info.h2d_stream_index = layer_info.connected_context_info.stream_index;
+ ddr_pair_info.d2h_stream_index = layer_info.stream_index;
+ ddr_pair_info.network_index = layer_info.network_index;
+
+ // It is assumed that output channels are parsed before input channels.
+ // Allocate vdma channel index for both edges
+ const auto h2d_layer_identifier = std::make_tuple(LayerType::DDR, layer_info.name, ddr_pair_info.h2d_stream_index);
+ const auto h2d_channel_id = resources_manager.get_available_channel_id(h2d_layer_identifier,
+ HailoRTDriver::DmaDirection::H2D, layer_info.connected_context_info.dma_engine_index);
+ CHECK_EXPECTED_AS_STATUS(h2d_channel_id);
+ ddr_pair_info.h2d_channel_id = h2d_channel_id.value();
+
+ const auto d2h_layer_identifier = std::make_tuple(LayerType::DDR, layer_info.name, ddr_pair_info.d2h_stream_index);
+ const auto d2h_channel_id = resources_manager.get_available_channel_id(d2h_layer_identifier,
+ HailoRTDriver::DmaDirection::D2H, layer_info.dma_engine_index);
+ CHECK_EXPECTED_AS_STATUS(d2h_channel_id);
+ ddr_pair_info.d2h_channel_id = d2h_channel_id.value();
+
+ ddr_pair_info.row_size = layer_info.nn_stream_config.core_bytes_per_buffer;
+ ddr_pair_info.min_buffered_rows = layer_info.ddr_info.min_buffered_rows;
+ ddr_pair_info.total_buffers_per_frame = layer_info.ddr_info.total_buffers_per_frame;
+
+ // Create the ddr buffer
+ auto ddr_channels_pair = context_resources.create_ddr_channels_pair(ddr_pair_info);
+ CHECK_EXPECTED_AS_STATUS(ddr_channels_pair);
+
+ // On ddr layers, we assume the periph credit size is aligned to the size of descriptor, so we don't want to
+ // optimize the credits.
+ const bool should_optimize_credits = false;
+ auto local_layer_info = update_layer_info(layer_info, ddr_channels_pair->get().get_host_buffer_info(), hw_consts,
+ should_optimize_credits);
+ CHECK_EXPECTED_AS_STATUS(local_layer_info);
+
+ context_resources.add_edge_layer(local_layer_info.value(), ddr_pair_info.d2h_channel_id,
+ ddr_channels_pair->get().get_host_buffer_info());
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_ddr_input_layer(ContextResources &context_resources,
+ const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts)
+{
+ auto connected_stream_index = layer_info.connected_context_info.stream_index;
+ auto ddr_channels_pair = context_resources.get_ddr_channels_pair(connected_stream_index);
+ CHECK(ddr_channels_pair, HAILO_INVALID_HEF, "Matching DDR layer as not found for context {} src stream {}",
+ layer_info.context_index, connected_stream_index);
+
+ const auto ddr_info = ddr_channels_pair->get().info();
+ LOGGER__DEBUG("DDR layer: input stream_index: {}, output stream_index: {}, h2d_channel {}, d2h_channel: {}.",
+ ddr_info.h2d_stream_index, ddr_info.d2h_stream_index, ddr_info.h2d_channel_id, ddr_info.d2h_channel_id);
+
+ CHECK(layer_info.stream_index == ddr_info.h2d_stream_index, HAILO_INVALID_HEF, "DDR channel pair mismatch in h2d channel");
+ CHECK(layer_info.connected_context_info.stream_index == ddr_info.d2h_stream_index, HAILO_INVALID_HEF, "DDR channel pair mismatch in d2h channel");
+ CHECK(layer_info.network_index == ddr_info.network_index, HAILO_INVALID_HEF, "DDR channel pair mismatch network_index");
+
+ // On ddr layers, we assume the periph credit size is aligned to the size of descriptor, so we don't want to
+ // optimize the credits.
+ const bool should_optimize_credits = false;
+ auto local_layer_info = update_layer_info(layer_info, ddr_channels_pair->get().get_host_buffer_info(), hw_consts,
+ should_optimize_credits);
+ CHECK_EXPECTED_AS_STATUS(local_layer_info);
+
+ context_resources.add_edge_layer(local_layer_info.value(), ddr_channels_pair->get().info().h2d_channel_id,
+ ddr_channels_pair->get().get_host_buffer_info());
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status add_ddr_buffers_info(std::vector<ContextSwitchConfigActionPtr> &configuration_actions,
+ const ContextResources &context_resources)
+{
+ bool start_fw_ddr_buffer_task = false;
+ for (auto& ddr_channels_pair : context_resources.get_ddr_channels_pairs()) {
+ if (ddr_channels_pair.need_manual_credit_management()) {
+ const auto ddr_info = ddr_channels_pair.info();
+ auto ddr_pair_action = DdrPairInfoAction::create(ddr_info.h2d_channel_id, ddr_info.d2h_channel_id,
+ ddr_info.network_index, ddr_channels_pair.descriptors_per_frame(), ddr_channels_pair.descs_count());
+ CHECK_EXPECTED_AS_STATUS(ddr_pair_action);
+ configuration_actions.emplace_back(ddr_pair_action.release());
+
+ start_fw_ddr_buffer_task = true;
+ }
+ }
+
+ if (start_fw_ddr_buffer_task) {
+ auto start_ddr_buffering_action = StartDdrBufferingTaskAction::create();
+ CHECK_EXPECTED_AS_STATUS(start_ddr_buffering_action);
+ configuration_actions.emplace_back(start_ddr_buffering_action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status parse_and_fill_edge_layers_mapping(
+ ContextResources &context_resources,
+ const ContextMetadata &context_metadata,
+ ResourcesManager &resources_manager)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ auto hw_consts = Control::get_hw_consts(resources_manager.get_device());
+ CHECK_EXPECTED_AS_STATUS(hw_consts);
+ const bool should_optimize_credits = hw_consts->should_optimize_credits &&
+ (HAILO_POWER_MODE_PERFORMANCE == resources_manager.get_power_mode());
+
+ // Parse the edge layer by order - first output edge layers, then ddr inputs and only then the input edge layers
+ // In order to insure that input data can enter the chip only after all other elements are configured.
+ // We parse ddr inputs before boundary/inter-context because otherwise on C2C mode we may lose some credit.
+
+ for (const auto &output_layer_info : context_metadata.get_ddr_output_layers()) {
+ status = fill_ddr_output_layer(context_resources, resources_manager, output_layer_info, *hw_consts);
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &output_layer_info : context_metadata.get_boundary_output_layers()) {
+ status = fill_boundary_output_layer(context_resources, resources_manager, output_layer_info,
+ *hw_consts, should_optimize_credits);
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &output_layer_info : context_metadata.get_inter_context_output_layers()) {
+ status = fill_inter_context_output_layer(context_resources, resources_manager, output_layer_info,
+ *hw_consts, should_optimize_credits);
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &input_layer_info : context_metadata.get_ddr_input_layers()) {
+ status = fill_ddr_input_layer(context_resources, input_layer_info, *hw_consts);
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &input_layer_info : context_metadata.get_boundary_input_layers()) {
+ status = fill_boundary_input_layer(context_resources, resources_manager, input_layer_info,
+ *hw_consts, should_optimize_credits);
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &input_layer_info : context_metadata.get_inter_context_input_layers()) {
+ status = fill_inter_context_input_layer(context_resources, resources_manager, input_layer_info,
+ *hw_consts, should_optimize_credits);
+ CHECK_SUCCESS(status);
+ }
+
+ status = context_resources.validate_edge_layers();
+ CHECK_SUCCESS(status);
+
+ /* UN-Lock resources at the end of the context -
+ h2d inter-context, d2h inter-context and DDR buffer channels */
+ for (const auto &input_layer_info : context_metadata.get_inter_context_input_layers()) {
+ status = resources_manager.free_channel_index(to_layer_identifier(input_layer_info));
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &output_layer_info : context_metadata.get_inter_context_output_layers()) {
+ status = resources_manager.free_channel_index(to_layer_identifier(output_layer_info));
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &output_layer_info : context_metadata.get_ddr_output_layers()) {
+ const auto h2d_layer_identifier = std::make_tuple(LayerType::DDR, output_layer_info.name,
+ output_layer_info.connected_context_info.stream_index);
+ status = resources_manager.free_channel_index(h2d_layer_identifier);
+ CHECK_SUCCESS(status);
+
+ const auto d2h_layer_identifier = std::make_tuple(LayerType::DDR, output_layer_info.name,
+ output_layer_info.stream_index);
+ status = resources_manager.free_channel_index(d2h_layer_identifier);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+// Returns pairs of form [start, end] (inclusive) of repeated 'ContextSwitchConfigAction's in the given vector
+static std::vector<std::pair<uint32_t, uint32_t>> get_repreated_actions_boundary_indices(
+ const std::vector<ContextSwitchConfigActionPtr> &actions)
+{
+ const uint32_t num_actions = static_cast<uint32_t>(actions.size());
+
+ std::vector<std::pair<uint32_t, uint32_t>> repeated_indexes;
+ uint32_t start_index = 0;
+ while (start_index < num_actions) {
+ auto end_index = start_index + 1;
+ do
+ {
+ if (end_index == num_actions) {
+ break;
+ }
+ if (actions[start_index]->get_type() != actions[end_index]->get_type()) {
+ break;
+ }
+ end_index++;
+ } while (true);
+
+ repeated_indexes.emplace_back(start_index, end_index - 1);
+ start_index = end_index;
+ }
+
+ return repeated_indexes;
+}
+
+// Returns a map from start indexes of repeated actions to the size of the chunk (number of repeated actions)
+static std::map<uint32_t, uint8_t> get_start_indexes_of_repeated_actions(
+ const std::vector<ContextSwitchConfigActionPtr> &actions,
+ const std::vector<std::pair<uint32_t, uint32_t>> &repeated_indexes,
+ // TODO: get this from HardCoded config (HRT-5352)
+ const std::set<ContextSwitchConfigAction::Type> &action_types_denylist = {})
+{
+ std::map<uint32_t, uint8_t> result;
+ for (const auto &index_pair : repeated_indexes) {
+ if (!actions[index_pair.first]->supports_repeated_block()) {
+ continue;
+ }
+
+ if (contains(action_types_denylist, actions[index_pair.first]->get_type())) {
+ continue;
+ }
+
+ // TODO: Move merge calculation to HRT-5352
+ // Merge calculation (see also - CONTEXT_SWITCH_DEFS__repeated_action_header_t in common/include/context_switch_defs.h):
+ // * Assume there are x repeated actions that can be merged
+ // * Let a := sizeof(action_to_be_merged) [without CONTEXT_SWITCH_DEFS__common_action_header_t]
+ // * sizeof(CONTEXT_SWITCH_DEFS__common_action_header_t) is 5
+ // * sizeof(CONTEXT_SWITCH_DEFS__repeated_action_header_t) is 3
+ // Then:
+ // * original_size = x * (5 + a) = 5x + ax
+ // * new_size = 5 + 3 + ax = 8 + ax
+ // * new_size < original_size <=> 8 + ax < 5x + ax <=> 8 < 5x <=> 1.6 < x
+ // Hence we merge for x >= 2
+ static_assert(sizeof(CONTEXT_SWITCH_DEFS__common_action_header_t) == 5,
+ "Merge calculation assumes that 'sizeof(CONTEXT_SWITCH_DEFS__common_action_header_t) == 5'");
+ static_assert(sizeof(CONTEXT_SWITCH_DEFS__repeated_action_header_t) == 3,
+ "Merge calculation assumes that 'sizeof(CONTEXT_SWITCH_DEFS__repeated_action_header_t) == 3'");
+ static const uint32_t MIN_REQUIRED_FOR_MERGING = 2;
+
+ uint32_t start_index = index_pair.first;
+ const uint32_t end_index = index_pair.second;
+ while (start_index < end_index) {
+ const auto curr_chunk_size = static_cast<uint8_t>(std::min(
+ static_cast<uint32_t>(std::numeric_limits<uint8_t>::max()),
+ end_index - start_index + 1));
+ if (curr_chunk_size < MIN_REQUIRED_FOR_MERGING) {
+ break;
+ }
+
+ result.emplace(start_index, curr_chunk_size);
+
+ start_index += curr_chunk_size;
+ }
+ }
+
+ return result;
+}
+
+static std::set<std::pair<uint32_t, uint32_t>> get_indexes_of_action_type(
+ const std::vector<ContextSwitchConfigActionPtr> &actions,
+ const std::vector<std::pair<uint32_t, uint32_t>> &repeated_indexes,
+ const ContextSwitchConfigAction::Type &required_action_type)
+{
+ std::set<std::pair<uint32_t, uint32_t>> result;
+ for (const auto &index_pair : repeated_indexes) {
+ const auto curr_action_type = actions[index_pair.first]->get_type();
+ if (required_action_type != curr_action_type) {
+ continue;
+ }
+
+ result.emplace(index_pair);
+ }
+
+ return result;
+}
+
+static hailo_status push_fetch_config_actions(
+ ConfigBuffer &config_resources, uint8_t config_stream_index,
+ uint16_t total_ccw_bursts, bool support_pre_fetch,
+ std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
+{
+ if (support_pre_fetch) {
+ auto action = AddCcwBurstAction::create(config_stream_index, total_ccw_bursts);
+ CHECK_EXPECTED_AS_STATUS(action);
+ processed_configuration_actions.emplace_back(action.release());
+ } else {
+ const auto desc_count = config_resources.program_descriptors();
+ CHECK_EXPECTED_AS_STATUS(desc_count);
+
+ auto action = FetchCfgChannelDescriptorsAction::create(config_resources.channel_id(), desc_count.value());
+ CHECK_EXPECTED_AS_STATUS(action);
+ processed_configuration_actions.emplace_back(action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status write_ccw_to_buffer(ConfigBuffer& config_buffer, const WriteDataCcwAction &ccw_action,
+ bool support_pre_fetch)
+{
+ const bool is_last_write = config_buffer.size_left() == ccw_action.data().size();
+ if (support_pre_fetch && is_last_write) {
+ auto status = config_buffer.pad_with_nops();
+ CHECK_SUCCESS(status);
+ }
+
+ auto status = config_buffer.write(ccw_action.data());
+ CHECK_SUCCESS(status);
+
+ if (support_pre_fetch && is_last_write) {
+ auto desc_count = config_buffer.program_descriptors();
+ CHECK_EXPECTED_AS_STATUS(desc_count);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status proccess_write_ccw_action(const ContextSwitchConfigActionPtr &configuration_action,
+ std::vector<ConfigBuffer> &config_resources,
+ const bool support_pre_fetch,
+ std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
+{
+ assert(ContextSwitchConfigAction::Type::WriteDataCcw == configuration_action->get_type());
+ const auto &write_ccw_action = *static_cast<const WriteDataCcwAction*>(configuration_action.get());
+
+ const auto config_stream_index = write_ccw_action.config_stream_index();
+ assert(config_stream_index < config_resources.size());
+ auto status = write_ccw_to_buffer(config_resources[config_stream_index], write_ccw_action, support_pre_fetch);
+ CHECK_SUCCESS(status);
+
+ status = push_fetch_config_actions(config_resources[config_stream_index], config_stream_index,
+ write_ccw_action.total_ccw_burst(), support_pre_fetch, processed_configuration_actions);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+static Expected<uint8_t> find_dummy_stream(const LayerInfo &layer_info, const ContextResources &context_resources)
+{
+ const auto other_direction = (HAILO_H2D_STREAM == layer_info.direction) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
+ const auto other_direction_edge_layers = context_resources.get_edge_layers(other_direction);
+ CHECK_AS_EXPECTED(!other_direction_edge_layers.empty(), HAILO_INTERNAL_FAILURE, "Couldn't find dummy stream");
+ return Expected<uint8_t>(other_direction_edge_layers.front().layer_info.stream_index);
+}
+
+static hailo_status add_change_vdma_to_stream_mapping(
+ const CoreOpMetadata &core_op_metadata, const ResourcesManager &resources_manager,
+ ContextResources &context_resources, uint8_t context_index,
+ std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
+{
+ for (const auto &layer_info : core_op_metadata.get_all_layer_infos()) {
+ auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
+ CHECK_EXPECTED_AS_STATUS(vdma_channel);
+
+ const auto channel_id = vdma_channel.value()->get_channel_id();
+ const bool is_dummy_stream = layer_info.context_index != context_index;
+ uint8_t stream_index = layer_info.stream_index;
+ if (is_dummy_stream) {
+ auto dummy_stream_index = find_dummy_stream(layer_info, context_resources);
+ CHECK_EXPECTED_AS_STATUS(dummy_stream_index);
+ stream_index = *dummy_stream_index;
+ }
+
+ auto action = ChangeVdmaToStreamMapping::create(channel_id, stream_index, is_dummy_stream);
+ CHECK_EXPECTED_AS_STATUS(action);
+ processed_configuration_actions.emplace_back(action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status push_edge_layer_activation_actions(
+ const ContextResources &context_resources,
+ std::vector<ContextSwitchConfigActionPtr> &actions)
+{
+ // Activate the edge layer by order - first output edge layers, then ddr inputs and only then the input edge layers
+ // In order to insure that input data can enter the chip only after all other elements are configured.
+ // We parse ddr inputs before boundary/inter-context because otherwise on C2C mode we may lose some credit.
+
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::DDR, HAILO_D2H_STREAM)) {
+ auto activate_action = ActivateDdrOutputChannelAction::create(edge_layer.channel_id,
+ edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
+ edge_layer.layer_info.ddr_info.min_buffered_rows);
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ actions.emplace_back(activate_action.release());
+ }
+
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_D2H_STREAM)) {
+ auto activate_action = ActivateBoundaryOutputChannelAction::create(edge_layer.channel_id,
+ edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info);
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ actions.emplace_back(activate_action.release());
+ }
+
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::INTER_CONTEXT, HAILO_D2H_STREAM)) {
+ auto activate_action = ActivateInterContextOutputChannelAction::create(edge_layer.channel_id,
+ edge_layer.layer_info.stream_index, edge_layer.layer_info.network_index,
+ edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info);
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ actions.emplace_back(activate_action.release());
+ }
+
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::DDR, HAILO_H2D_STREAM)) {
+ const auto d2h_stream_index = edge_layer.layer_info.connected_context_info.stream_index;
+ auto pair = context_resources.get_ddr_channels_pair(d2h_stream_index);
+ CHECK_EXPECTED_AS_STATUS(pair);
+ const auto d2h_channel_id = pair->get().info().d2h_channel_id;
+
+ auto activate_action = ActivateDdrInputChannelAction::create(edge_layer.channel_id,
+ edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
+ edge_layer.layer_info.max_shmifo_size, d2h_channel_id);
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ actions.emplace_back(activate_action.release());
+ }
+
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM)) {
+ auto activate_action = ActivateBoundaryInputChannelAction::create(edge_layer.channel_id,
+ edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
+ edge_layer.layer_info.max_shmifo_size);
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ actions.emplace_back(activate_action.release());
+ }
+
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::INTER_CONTEXT, HAILO_H2D_STREAM)) {
+ auto activate_action = ActivateInterContextInputChannelAction::create(edge_layer.channel_id,
+ edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
+ edge_layer.layer_info.max_shmifo_size);
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ actions.emplace_back(activate_action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status proccess_trigger_new_data_input_action(const ContextSwitchConfigActionPtr &configuration_action,
+ uint32_t trigger_new_data_from_input_group_start,
+ uint32_t trigger_new_data_from_input_group_end,
+ const uint32_t &action_index,
+ const CoreOpMetadata &core_op_metadata,
+ const ResourcesManager &resources_manager,
+ ContextResources &context_resources,
+ uint8_t context_index,
+ std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions, bool is_single_context)
+{
+ if (trigger_new_data_from_input_group_start == action_index) {
+ auto status = push_edge_layer_activation_actions(context_resources, processed_configuration_actions);
+ CHECK_SUCCESS(status);
+
+ if (!is_single_context) {
+ status = add_change_vdma_to_stream_mapping(core_op_metadata, resources_manager,
+ context_resources, context_index, processed_configuration_actions);
+ CHECK_SUCCESS(status);
+ }
+
+ // DDR buffer info actions need to happen after the edge layer activation actions.
+ status = add_ddr_buffers_info(processed_configuration_actions, context_resources);
+ CHECK_SUCCESS(status);
+ }
+
+ // Add the current action
+ processed_configuration_actions.emplace_back(configuration_action);
+
+ // At the end of a consecutive group of TriggerNewDataFromDataInput actions, we can trigger the BurstCreditsTask
+ // in the FW, via StartBurstCreditsTaskAction.
+ if (trigger_new_data_from_input_group_end == action_index) {
+ auto start_burst_credits_task_action = StartBurstCreditsTaskAction::create();
+ CHECK_EXPECTED_AS_STATUS(start_burst_credits_task_action);
+ processed_configuration_actions.emplace_back(start_burst_credits_task_action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+// At the end of each consecutive group of WriteDataCcwAction, a FetchCfgChannelDescriptorsAction is added.
+static hailo_status add_fetch_config_actions(std::vector<ContextSwitchConfigActionPtr> &configuration_actions,
+ std::vector<ConfigBuffer> &config_resources, bool support_pre_fetch)
+{
+
+ std::vector<ContextSwitchConfigActionPtr> processed_configuration_actions;
+ for (uint32_t action_index = 0; action_index < configuration_actions.size(); action_index++) {
+ const auto &configuration_action = configuration_actions[action_index];
+ if (ContextSwitchConfigAction::Type::WriteDataCcw == configuration_action->get_type()) {
+ auto status = proccess_write_ccw_action(configuration_action, config_resources,
+ support_pre_fetch, processed_configuration_actions);
+ CHECK_SUCCESS(status);
+ } else {
+ // Add the current action
+ processed_configuration_actions.emplace_back(configuration_action);
+ }
+ }
+
+ // Replace the original configuration actions with the processed ones.
+ configuration_actions = processed_configuration_actions;
+
+ return HAILO_SUCCESS;
+}
+
+// Push activate config channels in the beginning of the context, and deactivation on end of context.
+static hailo_status add_config_channel_activation_actions(std::vector<ContextSwitchConfigActionPtr> &actions,
+ const std::vector<ConfigBuffer> &config_resources)
+{
+ std::vector<ContextSwitchConfigActionPtr> processed_actions;
+ const size_t new_actions_count = 2 * config_resources.size();
+ processed_actions.reserve(actions.size() + new_actions_count);
+
+ for (uint8_t config_stream_index = 0; config_stream_index < config_resources.size(); config_stream_index++) {
+ const auto &config_buffer = config_resources[config_stream_index];
+ auto activate_action = ActivateConfigChannelAction::create(config_stream_index, config_buffer.channel_id(),
+ config_buffer.get_host_buffer_info());
+ CHECK_EXPECTED_AS_STATUS(activate_action);
+ processed_actions.push_back(activate_action.release());
+ }
+
+ processed_actions.insert(processed_actions.end(), actions.begin(), actions.end());
+
+ for (uint8_t config_stream_index = 0; config_stream_index < config_resources.size(); config_stream_index++) {
+ const auto &config_buffer = config_resources[config_stream_index];
+ auto deactivate_action = DeactivateConfigChannelAction::create(config_stream_index, config_buffer.channel_id());
+ CHECK_EXPECTED_AS_STATUS(deactivate_action);
+ processed_actions.push_back(deactivate_action.release());
+ }
+
+ actions = processed_actions;
+ return HAILO_SUCCESS;
+}
+
+// For any context with edge layers (the preliminary context when in preliminary_run_asap mode or dynamic contexts),
+// we need to add the following:
+// * Activate*Channel actions (activation order is documented in push_edge_layer_activation_actions)
+// * ChangeVdmaToStreamMapping for each boundary stream in the network group (even for boundaries not activated in the
+// current context).
+// * DdrPairInfoActions for each ddr, followed by StartDdrBufferingTaskAction.
+// * TriggerNewDataFromDataInput for each input layer (inter context/ boundary) in the context. This action is given
+// from the HEF.
+// * Finally StartBurstCreditsTaskAction
+static hailo_status handle_edge_layer_activation_actions(std::vector<ContextSwitchConfigActionPtr> &configuration_actions,
+ const CoreOpMetadata &core_op_metadata,
+ const ResourcesManager &resources_manager, ContextResources &context_resources, uint8_t context_index,
+ bool is_single_context)
+{
+ const auto repeated_indexes = get_repreated_actions_boundary_indices(configuration_actions);
+ const auto trigger_new_data_from_input_group_indexes = get_indexes_of_action_type(
+ configuration_actions, repeated_indexes, ContextSwitchConfigAction::Type::TriggerNewDataFromDataInput);
+ CHECK(trigger_new_data_from_input_group_indexes.size() == 1, HAILO_INTERNAL_FAILURE,
+ "Expected only one group of TriggerNewDataFromDataInput actions");
+ const auto trigger_new_data_from_input_group_start = trigger_new_data_from_input_group_indexes.cbegin()->first;
+ const auto trigger_new_data_from_input_group_end = trigger_new_data_from_input_group_indexes.cbegin()->second;
+
+ std::vector<ContextSwitchConfigActionPtr> processed_configuration_actions;
+ for (uint32_t action_index = 0; action_index < configuration_actions.size(); action_index++) {
+ const auto &configuration_action = configuration_actions[action_index];
+ if (ContextSwitchConfigAction::Type::TriggerNewDataFromDataInput == configuration_action->get_type()) {
+ auto status = proccess_trigger_new_data_input_action(configuration_action,
+ trigger_new_data_from_input_group_start, trigger_new_data_from_input_group_end, action_index,
+ core_op_metadata, resources_manager, context_resources, context_index, processed_configuration_actions, is_single_context);
+ CHECK_SUCCESS(status);
+ } else {
+ // Add the current action
+ processed_configuration_actions.emplace_back(configuration_action);
+ }
+ }
+
+ // Replace the original configuration actions with the processed ones.
+ configuration_actions = processed_configuration_actions;
+
+ return HAILO_SUCCESS;
+}
+
+// If groups of consecutive actions can be "merged" as repeated actions (saving room the FW's
+// action list) a RepeatedAction is placed before the relevant actions.
+// See also: CONTEXT_SWITCH_DEFS__repeated_action_header_t's documenting in context_switch_defs.h.
+static hailo_status handle_repeated_actions(std::vector<ContextSwitchConfigActionPtr> &configuration_actions)
+{
+ const auto repeated_indexes = get_repreated_actions_boundary_indices(configuration_actions);
+ const auto start_indexes_of_repeated_actions = get_start_indexes_of_repeated_actions(
+ configuration_actions, repeated_indexes);
+
+ std::vector<ContextSwitchConfigActionPtr> processed_configuration_actions;
+ processed_configuration_actions.reserve(configuration_actions.size() + start_indexes_of_repeated_actions.size());
+
+ uint32_t action_index = 0;
+ while (action_index < configuration_actions.size()){
+ if (contains(start_indexes_of_repeated_actions, action_index)) {
+ // A group of actions can be "merged" as repeated actions.
+ // Add a RepeatedAction
+ const auto num_repeated = start_indexes_of_repeated_actions.at(action_index);
+
+ std::vector<ContextSwitchConfigActionPtr> repeated_block;
+ repeated_block.reserve(num_repeated);
+ for (uint32_t repeated_offset = 0; repeated_offset < num_repeated; repeated_offset++) {
+ repeated_block.emplace_back(configuration_actions[action_index]);
+ action_index++;
+ }
+
+ auto repeated_header_action = RepeatedAction::create(std::move(repeated_block));
+ CHECK_EXPECTED_AS_STATUS(repeated_header_action);
+ processed_configuration_actions.emplace_back(repeated_header_action.value());
+ }
+ else {
+ processed_configuration_actions.emplace_back(configuration_actions[action_index]);
+ action_index++;
+ }
+ }
+
+ // Replace the original configuration actions with the processed ones.
+ configuration_actions = processed_configuration_actions;
+
+ return HAILO_SUCCESS;
+}
+
+static bool is_hailo15_device_type(const ProtoHEFHwArch &hw_arch)
+{
+ // Compare with HW_ARCH__LAVENDER and HW_ARCH__GINGER to support hefs compiled for them
+ return (PROTO__HW_ARCH__GINGER == hw_arch) || (PROTO__HW_ARCH__LAVENDER == hw_arch) ||
+ (PROTO__HW_ARCH__HAILO15H == hw_arch);
+}
+
+static hailo_status write_action_list(const ContextResources & context_resources, ContextSwitchBufferBuilder &builder,
+ const std::vector<ContextSwitchConfigActionPtr> &actions)
+{
+ for (const auto &action : actions) {
+ auto action_buffers = action->serialize(context_resources);
+ CHECK_EXPECTED_AS_STATUS(action_buffers);
+
+ for (auto &action_buffer : action_buffers.value()) {
+ builder.write_action(MemoryView(action_buffer));
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status add_edge_layer_end_of_context_actions(const ContextResources &context_resources,
+ std::vector<ContextSwitchConfigActionPtr> &actions)
+{
+ for (const auto &edge_layer : context_resources.get_edge_layers()) {
+ const bool should_validate = (edge_layer.layer_info.type == LayerType::BOUNDARY);
+ auto action = should_validate ?
+ ValidateChannelAction::create(edge_layer) :
+ DeactivateChannelAction::create(edge_layer);
+ CHECK_EXPECTED_AS_STATUS(action);
+ actions.emplace_back(action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_context_recipes_for_multi_context(const ProtoHEFHwArch &hw_arch,
+ ContextResources &context_resources, ResourcesManager &resources_manager,
+ uint8_t context_index, const CoreOpMetadata &core_op_metadata, const ContextMetadata &context_metadata,
+ bool is_single_context)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ // Add edge layers mapping
+ status = parse_and_fill_edge_layers_mapping(context_resources, context_metadata, resources_manager);
+ CHECK_SUCCESS(status);
+
+ // Parse context
+ std::vector<ContextSwitchConfigActionPtr> actions = context_metadata.get_actions();
+
+ const auto support_pre_fetch = is_hailo15_device_type(hw_arch);
+ status = add_fetch_config_actions(actions, context_resources.get_config_buffers(), support_pre_fetch);
+ CHECK_SUCCESS(status);
+
+ status = handle_edge_layer_activation_actions(actions, core_op_metadata, resources_manager,
+ context_resources, context_index, is_single_context);
+ CHECK_SUCCESS(status);
+
+ status = add_config_channel_activation_actions(actions, context_resources.get_config_buffers());
+ CHECK_SUCCESS(status);
+
+ if (is_single_context) {
+ // Single context network must wait for network group change event after they finish the dynamic context.
+ auto wait_action = WaitForNetworkGroupChangeAction::create();
+ CHECK_EXPECTED_AS_STATUS(wait_action);
+ actions.emplace_back(wait_action.release());
+ }
+ else {
+ status = add_edge_layer_end_of_context_actions(context_resources, actions);
+ }
+
+ status = handle_repeated_actions(actions);
+ CHECK_SUCCESS(status);
+
+ return write_action_list(context_resources, context_resources.builder(), actions);
+}
+
+static hailo_status create_boundary_channels(ResourcesManager &resources_manager,
+ CoreOpMetadata &core_op_metadata)
+{
+ for (const auto &layer_info : core_op_metadata.get_all_layer_infos()) {
+ auto status = resources_manager.create_boundary_vdma_channel(layer_info);
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+}
+
+static hailo_status fill_activation_config_recepies_for_multi_context(
+ ContextResources &context_resources, ResourcesManager &resources_manager,
+ std::shared_ptr<CoreOpMetadata> core_op_metadata)
+{
+ auto hw_consts = Control::get_hw_consts(resources_manager.get_device());
+ CHECK_EXPECTED_AS_STATUS(hw_consts);
+ const bool should_optimize_credits = hw_consts->should_optimize_credits &&
+ (HAILO_POWER_MODE_PERFORMANCE == resources_manager.get_power_mode());
+
+ for (const auto &layer_info : core_op_metadata->get_output_layer_infos()){
+ auto status = fill_boundary_output_layer(context_resources, resources_manager, layer_info, *hw_consts,
+ should_optimize_credits);
+ CHECK_SUCCESS(status);
+ }
+
+ for (const auto &layer_info : core_op_metadata->get_input_layer_infos()) {
+ auto status = fill_boundary_input_layer(context_resources, resources_manager, layer_info, *hw_consts,
+ should_optimize_credits);
+ CHECK_SUCCESS(status);
+ }
+
+ auto status = context_resources.validate_edge_layers();
+ CHECK_SUCCESS(status);
+
+ std::vector<ContextSwitchConfigActionPtr> actions;
+ for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY)) {
+ auto action = edge_layer.layer_info.direction == HAILO_H2D_STREAM ?
+ OpenBoundaryInputChannelAction::create(edge_layer.channel_id, edge_layer.buffer_info) :
+ OpenBoundaryOutputChannelAction::create(edge_layer.channel_id, edge_layer.buffer_info);
+ CHECK_EXPECTED_AS_STATUS(action);
+ actions.emplace_back(action.release());
+ }
+
+ return write_action_list(context_resources, context_resources.builder(), actions);
+}
+
+static hailo_status fill_batch_switching_context_config_recepies_for_multi_context(
+ ContextResources &context_resources, const CoreOpMetadata &core_op_metadata)
+{
+ std::vector<ContextSwitchConfigActionPtr> actions;
+
+ // We need to reset the ddr buffering task when we change the batch_size (since it depends on the batch_size param)
+ auto reset_ddr_action = ResetDdrBufferingTaskAction::create();
+ CHECK_EXPECTED_AS_STATUS(reset_ddr_action);
+ actions.emplace_back(reset_ddr_action.release());
+
+ // We need to re-enable all the lcus of the first context since some of their config regs are batch dependent.
+ // => We'll filter out all of the "enable lcu" actions from the preliminary context
+ static const std::set<ContextSwitchConfigAction::Type> BATCH_SWITCHING_ACTIONS = {
+ ContextSwitchConfigAction::Type::EnableLcuDefault,
+ ContextSwitchConfigAction::Type::EnableLcuNonDefault
+ };
+ const auto batch_switch_actions = core_op_metadata.preliminary_context().get_actions_of_type(BATCH_SWITCHING_ACTIONS);
+ actions.insert(actions.end(), batch_switch_actions.begin(), batch_switch_actions.end());
+
+ auto status = handle_repeated_actions(actions);
+ CHECK_SUCCESS(status);
+
+ return write_action_list(context_resources, context_resources.builder(), actions);
+}
+
+static hailo_status fill_preliminary_config_recepies_for_multi_context(const ProtoHEFHwArch &hw_arch,
+ ContextResources &context_resources, ResourcesManager &resources_manager,
+ std::shared_ptr<CoreOpMetadata> core_op_metadata, const ContextMetadata &preliminary_context,
+ bool is_single_context)
+{
+ static const auto PRELIMINARY_CONTEXT_INDEX = 0; // First context in the hef
+
+ if (resources_manager.get_supported_features().preliminary_run_asap) {
+ // Add edge layers mapping (only preliminary_run_asap networks have edge layers in the preliminary context)
+ assert(PRELIMINARY_CONTEXT_INDEX < core_op_metadata->dynamic_contexts().size());
+ auto status = parse_and_fill_edge_layers_mapping(context_resources,
+ core_op_metadata->dynamic_contexts()[PRELIMINARY_CONTEXT_INDEX], resources_manager);
+ CHECK_SUCCESS(status);
+ }
+
+ // Parse preliminary config
+ std::vector<ContextSwitchConfigActionPtr> actions = preliminary_context.get_actions();
+
+ const auto support_pre_fetch = is_hailo15_device_type(hw_arch);
+ auto status = add_fetch_config_actions(actions, context_resources.get_config_buffers(), support_pre_fetch);
+ CHECK_SUCCESS(status);
+
+ if (resources_manager.get_supported_features().preliminary_run_asap) {
+ status = handle_edge_layer_activation_actions(actions, *core_op_metadata, resources_manager,
+ context_resources, PRELIMINARY_CONTEXT_INDEX, is_single_context);
+ CHECK_SUCCESS(status);
+ }
+
+ status = add_config_channel_activation_actions(actions, context_resources.get_config_buffers());
+ CHECK_SUCCESS(status);
+
+ status = handle_repeated_actions(actions);
+ CHECK_SUCCESS(status);
+
+ return write_action_list(context_resources, context_resources.builder(), actions);
+}
+
+
+
+Expected<std::shared_ptr<ResourcesManager>> ResourcesManagerBuilder::build(uint8_t current_core_op_index, VdmaDevice &device,
+ HailoRTDriver &driver, const ConfigureNetworkParams &config_params,
+ std::shared_ptr<CoreOpMetadata> core_op_metadata, const ProtoHEFHwArch &hw_arch)
+{
+ const auto num_contexts = core_op_metadata->dynamic_contexts().size() +
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_NUMBER_OF_NON_DYNAMIC_CONTEXTS;
+ CHECK_AS_EXPECTED(CONTROL_PROTOCOL__MAX_CONTEXTS_PER_NETWORK_GROUP >= num_contexts, HAILO_INVALID_HEF,
+ "App '{}' contains more contexts than allowed ({} > {})",
+ core_op_metadata->core_op_name(), num_contexts, CONTROL_PROTOCOL__MAX_CONTEXTS_PER_NETWORK_GROUP);
+
+ for (auto &network_params : config_params.network_params_by_name) {
+ CHECK(HAILO_MAX_BATCH_SIZE >= network_params.second.batch_size, make_unexpected(HAILO_INVALID_ARGUMENT),
+ "Given batch size ({}) for network group {}, network {} is bigger than max allowed ({})", network_params.second.batch_size,
+ core_op_metadata->core_op_name(), network_params.first, HAILO_MAX_BATCH_SIZE);
+ }
+
+ auto resources_manager = ResourcesManager::create(device, driver, config_params, core_op_metadata,
+ current_core_op_index);
+ CHECK_EXPECTED(resources_manager);
+
+ // TODO: Use a new flag in config_params.stream_params_by_name to mark channels as async channels.
+ // will also used to mark streams as async in ConfiguredNetworkGroupBase::create_in/output_stream_from_config_params
+ // (HRT-9104)
+ auto status = create_boundary_channels(resources_manager.value(), *core_op_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto activation_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION);
+ CHECK_EXPECTED(activation_context);
+ status = fill_activation_config_recepies_for_multi_context(activation_context.value().get(),
+ resources_manager.value(), core_op_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto batch_switching_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING);
+ CHECK_EXPECTED(batch_switching_context);
+ status = fill_batch_switching_context_config_recepies_for_multi_context(batch_switching_context.value().get(),
+ *core_op_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ const bool is_single_context = core_op_metadata->dynamic_contexts().size() == 1;
+
+ auto preliminary_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY,
+ core_op_metadata->preliminary_context().config_buffers_info());
+ CHECK_EXPECTED(preliminary_context);
+ status = fill_preliminary_config_recepies_for_multi_context(hw_arch, preliminary_context.value().get(),
+ resources_manager.value(), core_op_metadata, core_op_metadata->preliminary_context(), is_single_context);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ uint8_t context_index = 0;
+ for (const auto &context_metadata : core_op_metadata->dynamic_contexts()) {
+ auto new_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC,
+ context_metadata.config_buffers_info());
+ CHECK_EXPECTED(new_context);
+
+ status = fill_context_recipes_for_multi_context(hw_arch, new_context.value().get(), resources_manager.value(),
+ context_index, *core_op_metadata,
+ context_metadata, is_single_context);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ context_index++;
+ }
+
+ status = resources_manager->configure();
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto resources_manager_ptr = make_shared_nothrow<ResourcesManager>(resources_manager.release());
+ CHECK_NOT_NULL_AS_EXPECTED(resources_manager_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return resources_manager_ptr;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file resource_manager_builder.hpp
+ * @brief Builds a ResourcesManager object for the given CoreOp.
+ **/
+
+#ifndef _HAILO_RESOURCE_MANAGER_BUILDER_HPP_
+#define _HAILO_RESOURCE_MANAGER_BUILDER_HPP_
+
+#include "hef/hef_internal.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+
+
+namespace hailort
+{
+
+class ResourcesManagerBuilder final {
+public:
+ ResourcesManagerBuilder() = delete;
+
+ /* TODO HRT-5067 - work with hailo_device_architecture_t instead of ProtoHEFHwArch */
+ static Expected<std::shared_ptr<ResourcesManager>> build(uint8_t net_group_index, VdmaDevice &device,
+ HailoRTDriver &driver, const ConfigureNetworkParams &config_params,
+ std::shared_ptr<CoreOpMetadata> core_op, const ProtoHEFHwArch &hw_arch);
+
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_RESOURCE_MANAGER_BUILDER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file core_stream.cpp
- **/
-
-#include "core_stream.hpp"
-#include "control.hpp"
-
-namespace hailort
-{
-
-Expected<std::unique_ptr<CoreInputStream>> CoreInputStream::create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer,
- uint16_t batch_size, EventPtr network_group_activated_event)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- CHECK_AS_EXPECTED(device.get_type() == Device::Type::CORE, HAILO_INTERNAL_FAILURE,
- "Invalid device type");
-
- CoreDevice *core_device = reinterpret_cast<CoreDevice*>(&device);
- std::unique_ptr<CoreInputStream> local_stream(new (std::nothrow) CoreInputStream(*core_device,
- std::move(channel), edge_layer, std::move(network_group_activated_event), batch_size,
- DEFAULT_TRANSFER_TIMEOUT, status));
- CHECK((nullptr != local_stream), make_unexpected(HAILO_OUT_OF_HOST_MEMORY));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return local_stream;
-}
-
-CoreInputStream::CoreInputStream(
- CoreDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status) :
- VdmaInputStream(device, std::move(channel), edge_layer, network_group_activated_event,
- batch_size, transfer_timeout, HAILO_STREAM_INTERFACE_CORE, status)
-{}
-
-Expected<std::unique_ptr<CoreOutputStream>> CoreOutputStream::create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer,
- uint16_t batch_size, EventPtr network_group_activated_event)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- CHECK_AS_EXPECTED(device.get_type() == Device::Type::CORE, HAILO_INTERNAL_FAILURE,
- "Invalid device type");
-
- CoreDevice *core_device = reinterpret_cast<CoreDevice*>(&device);
- std::unique_ptr<CoreOutputStream> local_stream(new (std::nothrow) CoreOutputStream(*core_device,
- std::move(channel), edge_layer, std::move(network_group_activated_event),
- batch_size, DEFAULT_TRANSFER_TIMEOUT, status));
- CHECK((nullptr != local_stream), make_unexpected(HAILO_OUT_OF_HOST_MEMORY));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return local_stream;
-}
-
-CoreOutputStream::CoreOutputStream(
- CoreDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status) :
- VdmaOutputStream(device, std::move(channel), edge_layer,
- network_group_activated_event, batch_size, transfer_timeout, status)
-{}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file core_stream.hpp
- * @brief Stream object for Core device
- **/
-
-#ifndef _HAILO_CORE_STREAM_HPP_
-#define _HAILO_CORE_STREAM_HPP_
-
-#include "vdma_stream.hpp"
-#include "core_device.hpp"
-
-
-namespace hailort
-{
-
-class CoreInputStream : public VdmaInputStream {
-public:
- CoreInputStream(CoreInputStream &&other) = default;
- virtual ~CoreInputStream() = default;
-
- static Expected<std::unique_ptr<CoreInputStream>> create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event);
-
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_CORE; }
-
-private:
- CoreInputStream(
- CoreDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status);
-};
-
-class CoreOutputStream : public VdmaOutputStream {
-public:
- CoreOutputStream(CoreOutputStream &&other) = default;
- virtual ~CoreOutputStream() = default;
-
- static Expected<std::unique_ptr<CoreOutputStream>> create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event);
-
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_CORE; }
-
-private:
- explicit CoreOutputStream(
- CoreDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status);
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CORE_STREAM_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file d2h_event_queue.hpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#ifndef HAILO_D2H_EVENT_QUEUE_HPP_
-#define HAILO_D2H_EVENT_QUEUE_HPP_
-
-#include "d2h_events.h"
-#include "thread_safe_queue.hpp"
-
-namespace hailort
-{
-
-class D2hEventQueue : public SafeQueue<D2H_EVENT_MESSAGE_t> {
-public:
- void clear() {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_queue = std::queue<D2H_EVENT_MESSAGE_t>();
- }
-};
-
-} /* namespace hailort */
-
-#endif // HAILO_D2H_EVENT_QUEUE_HPP_
+++ /dev/null
-/*
- * =============================================================================
- *
- * HAILO
- *
- * Property of HAILO Tech
- * For Unrestricted Internal Use Only
- * Unauthorized reproduction and/or distribution is strictly prohibited.
- * This product is protected under copyright law and trade secret law
- * Created 2018, (C) Copyright 2018 Hailo Tech . All rights reserved.
- * as an unpublished work.
- */
-/**
-* Filename: d2h_events_parser.c
-*
-* Description: Implements parsing device to host notifications.
-*
-*=============================================================================*/
-
-#include <stdint.h>
-#include <string.h>
-#include "common/utils.hpp"
-#include "d2h_events.h"
-#include "byte_order.h"
-#include "common/logger_macros.hpp"
-
-using namespace hailort;
-
-/* Function prototype for control operations */
-typedef HAILO_COMMON_STATUS_t (*firmware_notifications_parser_t) (D2H_EVENT_MESSAGE_t *d2h_notification_message);
-
-/**********************************************************************
- * Private Declarations
- **********************************************************************/
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_rx_error(D2H_EVENT_MESSAGE_t *d2h_notification_message) ;
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_host_info_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_temperature_alarm_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_closed_streams_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_overcurrent_alert_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_nonfatal_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_fatal_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_error_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_breakpoint_reached(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_clock_changed_event_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
-
-/**********************************************************************
- * Globals
- **********************************************************************/
-firmware_notifications_parser_t g_firmware_notifications_parser[D2H_EVENT_ID_COUNT] = {
- D2H_EVENTS__parse_rx_error,
- D2H_EVENTS__parse_host_info_notification,
- D2H_EVENTS__parse_health_monitor_temperature_alarm_notification,
- D2H_EVENTS__parse_health_monitor_closed_streams_notification,
- D2H_EVENTS__parse_health_monitor_overcurrent_alert_notification,
- D2H_EVENTS__parse_health_monitor_lcu_ecc_nonfatal_notification,
- D2H_EVENTS__parse_health_monitor_lcu_ecc_fatal_notification,
- D2H_EVENTS__parse_health_monitor_cpu_ecc_error_notification,
- D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification,
- D2H_EVENTS__parse_context_switch_breakpoint_reached,
- D2H_EVENTS__parse_health_monitor_clock_changed_event_notification
-};
-/**********************************************************************
- * Internal Functions
- **********************************************************************/
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_rx_error(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_RX_ERROR_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.rx_error_event)) {
- LOGGER__ERROR("d2h notification invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- LOGGER__INFO("Got Rx Error {} Event From module_id {} with error {}, queue {}",((D2H_EVENT_PRIORITY_CRITICAL == d2h_notification_message->header.priority) ?"Critical":"Info"),
- d2h_notification_message->header.module_id, d2h_notification_message->message_parameters.rx_error_event.error, d2h_notification_message->message_parameters.rx_error_event.queue_number);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_host_info_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HOST_INFO_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.host_info_event)) {
- LOGGER__ERROR("d2h notification invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- LOGGER__INFO("Got host config {} Event From module_id {} with connection type {}",((D2H_EVENT_PRIORITY_CRITICAL == d2h_notification_message->header.priority) ?"Critical":"Info"),
- d2h_notification_message->header.module_id, ((D2H_EVENT_COMMUNICATION_TYPE_UDP == d2h_notification_message->message_parameters.host_info_event.connection_type) ?"UDP":"PCIe"));
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_temperature_alarm_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_TEMPERATURE_ALARM_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- switch (d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.temperature_zone) {
- case HAILO_TEMPERATURE_PROTECTION_TEMPERATURE_ZONE__GREEN:
- LOGGER__INFO("Got health monitor notification - temperature reached green zone. sensor id={}, TS00={}c, TS01={}c",
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.alarm_ts_id,
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts0_temperature,
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts1_temperature);
- break;
-
- case HAILO_TEMPERATURE_PROTECTION_TEMPERATURE_ZONE__ORANGE:
- LOGGER__WARNING("Got health monitor notification - temperature reached orange zone. sensor id={}, TS00={}c, TS01={}c",
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.alarm_ts_id,
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts0_temperature,
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts1_temperature);
- break;
-
- case HAILO_TEMPERATURE_PROTECTION_TEMPERATURE_ZONE__RED:
- LOGGER__CRITICAL("Got health monitor notification - temperature reached red zone. sensor id={}, TS00={}c, TS01={}c",
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.alarm_ts_id,
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts0_temperature,
- d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts1_temperature);
- break;
-
- default:
- LOGGER__ERROR("Got invalid health monitor notification - temperature zone could not be parsed.");
- status = HAILO_STATUS__D2H_EVENTS__INVALID_ARGUMENT;
- goto l_exit;
- }
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_clock_changed_event_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_CLOCK_CHANGED_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
- LOGGER__WARNING("Got health monitor notification - System's clock has been changed from {} to {}",
- d2h_notification_message->message_parameters.health_monitor_clock_changed_event.previous_clock,
- d2h_notification_message->message_parameters.health_monitor_clock_changed_event.current_clock);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_closed_streams_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_CLOSED_STREAMS_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.health_monitor_closed_streams_event)) {
- LOGGER__ERROR("d2h notification invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- LOGGER__CRITICAL("Got health monitor closed streams notification. temperature: TS00={} c, TS01={} c, inputs bitfield:{:x}, outputs bitfield:{:x}",
- d2h_notification_message->message_parameters.health_monitor_closed_streams_event.ts0_temperature,
- d2h_notification_message->message_parameters.health_monitor_closed_streams_event.ts1_temperature,
- d2h_notification_message->message_parameters.health_monitor_closed_streams_event.closed_input_streams,
- d2h_notification_message->message_parameters.health_monitor_closed_streams_event.closed_output_streams);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_overcurrent_alert_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_OVERCURRENT_ALERT_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event)) {
- LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- if (d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.is_last_overcurrent_violation_reached) {
- LOGGER__WARNING("Got health monitor notification - last overcurrent violation allow alert state. The exceeded alert threshold is {} mA",
- d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.exceeded_alert_threshold);
- } else {
- switch (d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.overcurrent_zone) {
- case HAILO_OVERCURRENT_PROTECTION_OVERCURRENT_ZONE__GREEN:
- LOGGER__INFO("Got health monitor notification - overcurrent reached green zone. clk frequency decrease process was stopped. The exceeded alert threshold is {} mA",
- d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.exceeded_alert_threshold);
- break;
- case HAILO_OVERCURRENT_PROTECTION_OVERCURRENT_ZONE__RED:
- LOGGER__CRITICAL("Got health monitor notification - overcurrent reached red zone. clk frequency decrease process was started. The exceeded alert threshold is {} mA",
- d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.exceeded_alert_threshold);
- break;
- default:
- LOGGER__ERROR("Got invalid health monitor notification - overcurrent alert state could not be parsed.");
- status = HAILO_STATUS__D2H_EVENTS__INVALID_ARGUMENT;
- goto l_exit;
- }
- }
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_nonfatal_notification(
- D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_LCU_ECC_ERROR_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h event lcu ecc uncorrectable error invalid parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(sizeof(d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event) != d2h_notification_message->header.payload_length) {
- LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- LOGGER__WARNING("Got health monitor LCU ECC correctable error event. cluster_bitmap={}",
- d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event.cluster_bitmap);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_fatal_notification(
- D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_LCU_ECC_ERROR_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h event invalid lcu ecc uncorrectable error parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(sizeof(d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event) != d2h_notification_message->header.payload_length) {
- LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- LOGGER__CRITICAL("Got health monitor LCU ECC uncorrectable error event. cluster_bitmap={}",
- d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event.cluster_bitmap);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_error_notification(
- D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- CHECK(D2H_EVENT_HEALTH_MONITOR_CPU_ECC_EVENT_PARAMETER_COUNT == d2h_notification_message->header.parameter_count,
- HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT,
- "d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
-
- CHECK(sizeof(d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event) == d2h_notification_message->header.payload_length,
- HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH,
- "d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
-
- LOGGER__ERROR("Got health monitor CPU ECC error event. memory_bitmap={}",
- d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event.memory_bitmap);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification(
- D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_HEALTH_MONITOR_CPU_ECC_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
- LOGGER__ERROR("d2h event invalid cpu ecc uncorrectable error parameter count: {}", d2h_notification_message->header.parameter_count);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
- goto l_exit;
- }
-
- if(sizeof(d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event) != d2h_notification_message->header.payload_length) {
- LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
- status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
- goto l_exit;
- }
-
- LOGGER__CRITICAL("Got health monitor CPU ECC fatal event. memory_bitmap={}",
- d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event.memory_bitmap);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
-l_exit:
- return status;
-}
-
-static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_breakpoint_reached(D2H_EVENT_MESSAGE_t *d2h_notification_message)
-{
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- CHECK(D2H_EVENT_CONTEXT_SWITCH_BREAKPOINT_REACHED_EVENT_PARAMETER_COUNT == d2h_notification_message->header.parameter_count,
- HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT,
- "d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
-
- CHECK(d2h_notification_message->header.payload_length ==
- sizeof(d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event),
- HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH,
- "d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
-
- LOGGER__INFO("Got Context switch breakpoint with net_group index {}, batch index {}, context index {}, action index {}",
- d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.application_index,
- d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.batch_index,
- d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.context_index,
- d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.action_index);
-
- status = HAILO_COMMON_STATUS__SUCCESS;
-
- return status;
-}
-
-/**********************************************************************
- * Public Functions
- **********************************************************************/
-HAILO_COMMON_STATUS_t D2H_EVENTS__parse_event(D2H_EVENT_MESSAGE_t *d2h_notification_message){
-
- HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- if (D2H_EVENT_ID_COUNT < d2h_notification_message->header.event_id){
- LOGGER__ERROR("d2h notification invalid notification_id: {}", d2h_notification_message->header.event_id);
- status = HAILO_STATUS__D2H_EVENTS__INVALID_ARGUMENT;
- goto l_exit;
- }
- status = g_firmware_notifications_parser[d2h_notification_message->header.event_id](d2h_notification_message);
-
-l_exit:
- return status;
-}
+++ /dev/null
-/**\r
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
-**/\r
-/**\r
- * @file ddr_channels_pair.cpp\r
- **/\r
-\r
-#include "ddr_channels_pair.hpp"\r
-#include "vdma/continuous_buffer.hpp"\r
-#include "vdma/sg_buffer.hpp"\r
-#include "common/utils.hpp"\r
-\r
-namespace hailort\r
-{\r
-\r
-\r
-Expected<DdrChannelsPair> DdrChannelsPair::create(HailoRTDriver &driver, const DdrChannelsInfo &ddr_channels_info)\r
-{\r
- auto buffer_exp = should_use_ccb(driver) ?\r
- create_ccb_buffer(driver, ddr_channels_info.row_size, ddr_channels_info.min_buffered_rows) :\r
- create_sg_buffer(driver, ddr_channels_info.row_size, ddr_channels_info.min_buffered_rows);\r
- CHECK_EXPECTED(buffer_exp);\r
- auto buffer_ptr = buffer_exp.release();\r
-\r
- CHECK_AS_EXPECTED(0 == (ddr_channels_info.row_size % buffer_ptr->desc_page_size()), HAILO_INTERNAL_FAILURE,\r
- "DDR channel buffer row size must be a multiple of descriptor page size");\r
-\r
- const auto interrupts_domain = VdmaInterruptsDomain::NONE;\r
- const auto total_size = buffer_ptr->descs_count() * buffer_ptr->desc_page_size();\r
- auto desc_count_local = buffer_ptr->program_descriptors(total_size, interrupts_domain, interrupts_domain, 0, true);\r
- CHECK_EXPECTED(desc_count_local);\r
-\r
- return DdrChannelsPair(std::move(buffer_ptr), ddr_channels_info);\r
-}\r
-\r
-uint16_t DdrChannelsPair::descs_count() const\r
-{\r
- assert(IS_FIT_IN_UINT16(m_buffer->descs_count()));\r
- return static_cast<uint16_t>(m_buffer->descs_count());\r
-}\r
-\r
-uint32_t DdrChannelsPair::descriptors_per_frame() const\r
-{\r
- return (m_info.row_size / m_buffer->desc_page_size()) * m_info.total_buffers_per_frame;\r
-}\r
-\r
-Expected<Buffer> DdrChannelsPair::read() const\r
-{\r
- const auto size = m_buffer->size();\r
- auto res = Buffer::create(size);\r
- CHECK_EXPECTED(res);\r
-\r
- auto status = m_buffer->read(res->data(), size, 0);\r
- CHECK_SUCCESS_AS_EXPECTED(status);\r
-\r
- return res.release();\r
-}\r
-\r
-const DdrChannelsInfo& DdrChannelsPair::info() const\r
-{\r
- return m_info;\r
-}\r
-\r
-\r
-bool DdrChannelsPair::need_manual_credit_management() const\r
-{\r
- // On scatter gather manual credit management is needed\r
- return m_buffer->type() == vdma::VdmaBuffer::Type::SCATTER_GATHER;\r
-}\r
-\r
-CONTROL_PROTOCOL__host_buffer_info_t DdrChannelsPair::get_host_buffer_info() const\r
-{\r
- return m_buffer->get_host_buffer_info(m_info.row_size);\r
-}\r
-\r
-Expected<std::unique_ptr<vdma::VdmaBuffer>> DdrChannelsPair::create_sg_buffer(HailoRTDriver &driver,\r
- uint32_t row_size, uint16_t buffered_rows)\r
-{\r
- auto desc_sizes_pair = VdmaDescriptorList::get_desc_buffer_sizes_for_single_transfer(driver,\r
- buffered_rows, buffered_rows, row_size);\r
- CHECK_EXPECTED(desc_sizes_pair);\r
- auto desc_page_size = desc_sizes_pair->first;\r
- auto descs_count = desc_sizes_pair->second;\r
-\r
- auto buffer = vdma::SgBuffer::create(driver, descs_count, desc_page_size,\r
- HailoRTDriver::DmaDirection::BOTH);\r
- CHECK_EXPECTED(buffer);\r
-\r
- auto buffer_ptr = make_unique_nothrow<vdma::SgBuffer>(buffer.release());\r
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);\r
-\r
- return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));\r
-}\r
-\r
-DdrChannelsPair::DdrChannelsPair(std::unique_ptr<vdma::VdmaBuffer> &&buffer, const DdrChannelsInfo &ddr_channels_info) :\r
- m_buffer(std::move(buffer)),\r
- m_info(ddr_channels_info)\r
-{}\r
-\r
-Expected<std::unique_ptr<vdma::VdmaBuffer>> DdrChannelsPair::create_ccb_buffer(HailoRTDriver &driver,\r
- uint32_t row_size, uint16_t buffered_rows)\r
-{\r
- // The first 12 channels in D2H CCB ("regular channels") requires that the amount of descriptors will be a power\r
- // of 2. Altough the 4 last channels ("enhanced channels") don't have this requirements, we keep the code the same.\r
- auto buffer_size = vdma::ContinuousBuffer::get_buffer_size_desc_power2(row_size * buffered_rows);\r
- auto buffer = vdma::ContinuousBuffer::create(buffer_size, driver);\r
- CHECK_EXPECTED(buffer);\r
-\r
- auto buffer_ptr = make_unique_nothrow<vdma::ContinuousBuffer>(buffer.release());\r
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);\r
-\r
- return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));\r
-}\r
-\r
-bool DdrChannelsPair::should_use_ccb(HailoRTDriver &driver)\r
-{\r
- switch (driver.dma_type()) {\r
- case HailoRTDriver::DmaType::PCIE:\r
- return false;\r
- case HailoRTDriver::DmaType::DRAM:\r
- return true;\r
- }\r
-\r
-\r
- // Shouldn't reach here\r
- assert(false);\r
- return false;\r
-}\r
-\r
-} /* namespace hailort */\r
+++ /dev/null
-/**\r
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
-**/\r
-/**\r
- * @file ddr_channels_pair.hpp\r
- * @brief DDR channel pairs are pair of vdma channels used in the same context for skip-connection.\r
- **/\r
-\r
-#ifndef _HAILO_DDR_CHANNELS_PAIR_HPP_\r
-#define _HAILO_DDR_CHANNELS_PAIR_HPP_\r
-\r
-#include "hailo/hailort.h"\r
-#include "hailo/buffer.hpp"\r
-#include "vdma/vdma_buffer.hpp"\r
-\r
-namespace hailort\r
-{\r
-\r
-struct DdrChannelsInfo\r
-{\r
- vdma::ChannelId d2h_channel_id;\r
- uint8_t d2h_stream_index;\r
- vdma::ChannelId h2d_channel_id;\r
- uint8_t h2d_stream_index;\r
- uint8_t network_index;\r
- uint16_t row_size;\r
- uint16_t min_buffered_rows;\r
- // total_buffers_per_frame not same as core_buffer_per frame. \r
- //(In DDR core buffer per frame is 1). Used to calc total host descriptors_per_frame. \r
- uint16_t total_buffers_per_frame;\r
-};\r
-\r
-class DdrChannelsPair final\r
-{\r
-public:\r
- static Expected<DdrChannelsPair> create(HailoRTDriver &driver, const DdrChannelsInfo &ddr_channels_info);\r
-\r
- uint16_t descs_count() const;\r
- uint32_t descriptors_per_frame() const;\r
- Expected<Buffer> read() const;\r
- const DdrChannelsInfo & info() const;\r
-\r
- // Checks if the credits are automaticaly going from d2h channel to its h2d channel, or it needs to be done manually\r
- // (Using a fw task).\r
- bool need_manual_credit_management() const;\r
-\r
- CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;\r
-\r
-private:\r
- DdrChannelsPair(std::unique_ptr<vdma::VdmaBuffer> &&buffer, const DdrChannelsInfo &ddr_channels_info);\r
-\r
- static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,\r
- uint32_t row_size, uint16_t buffered_rows);\r
- static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_ccb_buffer(HailoRTDriver &driver,\r
- uint32_t row_size, uint16_t buffered_rows);\r
-\r
- static bool should_use_ccb(HailoRTDriver &driver);\r
-\r
- std::unique_ptr<vdma::VdmaBuffer> m_buffer;\r
- DdrChannelsInfo m_info;\r
-};\r
-\r
-} /* namespace hailort */\r
-\r
-#endif /* _HAILO_DDR_CHANNELS_PAIR_HPP_ */\r
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file device.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include <hailo/hailort.h>
-#include "hailo/device.hpp"
-#include "common/utils.hpp"
-#include "control.hpp"
-#include <memory>
-#include "byte_order.h"
-#include "firmware_header_utils.h"
-#include "control_protocol.h"
-#include "pcie_device.hpp"
-#include "eth_device.hpp"
-#include "core_device.hpp"
-
-#ifndef _MSC_VER
-#include <sys/utsname.h>
-#endif
-
-namespace hailort
-{
-
-#define WRITE_CHUNK_SIZE (1024)
-#define DEVICE_WORD_SIZE (4)
-
-Device::Device(Type type) :
- m_type(type),
- m_control_sequence(0),
- m_is_control_version_supported(false),
- m_device_architecture(HAILO_ARCH_MAX_ENUM)
-{
-#ifndef _MSC_VER
- struct utsname uname_data;
- if (-1 != uname(&uname_data)) {
- LOGGER__INFO("OS Version: {} {} {} {}", uname_data.sysname, uname_data.release,
- uname_data.version,uname_data.machine);
- } else {
- LOGGER__ERROR("uname failed (errno = {})", errno);
- }
-#endif
-}
-
-Expected<std::vector<std::string>> Device::scan()
-{
- // TODO: HRT-7530 support both CORE and PCIE
- if (CoreDevice::is_loaded()) {
- return std::vector<std::string>{CoreDevice::DEVICE_ID};
- }
- else {
- auto pcie_device_infos = PcieDevice::scan();
- CHECK_EXPECTED(pcie_device_infos);
-
- std::vector<std::string> results;
- results.reserve(pcie_device_infos->size());
-
- for (const auto pcie_device_info : pcie_device_infos.release()) {
- auto device_id = pcie_device_info_to_string(pcie_device_info);
- CHECK_EXPECTED(device_id);
- results.emplace_back(device_id.release());
- }
-
- return results;
- }
-}
-
-Expected<std::vector<hailo_pcie_device_info_t>> Device::scan_pcie()
-{
- return PcieDevice::scan();
-}
-
-Expected<std::vector<hailo_eth_device_info_t>> Device::scan_eth(const std::string &interface_name,
- std::chrono::milliseconds timeout)
-{
- return EthernetDevice::scan(interface_name, timeout);
-}
-
-Expected<std::vector<hailo_eth_device_info_t>> Device::scan_eth_by_host_address(const std::string &host_address,
- std::chrono::milliseconds timeout)
-{
- return EthernetDevice::scan_by_host_address(host_address, timeout);
-}
-
-Expected<std::unique_ptr<Device>> Device::create()
-{
- auto device_ids = scan();
- CHECK_EXPECTED(device_ids, "Failed scan devices");
- CHECK_AS_EXPECTED(device_ids->size() == 1, HAILO_INVALID_OPERATION,
- "Expected only 1 device on the system (found {}). Pass device_id to create a specific device", device_ids->size());
-
- return Device::create(device_ids->at(0));
-}
-
-Expected<std::unique_ptr<Device>> Device::create(const std::string &device_id)
-{
- const bool DONT_LOG_ON_FAILURE = false;
- if (CoreDevice::DEVICE_ID == device_id) {
- return create_core();
- }
- else if (auto pcie_info = PcieDevice::parse_pcie_device_info(device_id, DONT_LOG_ON_FAILURE)) {
- return create_pcie(pcie_info.release());
- }
- else if (auto eth_info = EthernetDevice::parse_eth_device_info(device_id, DONT_LOG_ON_FAILURE)) {
- return create_eth(eth_info.release());
- }
- else {
- LOGGER__ERROR("Invalid device id {}", device_id);
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
-}
-
-Expected<std::unique_ptr<Device>> Device::create_pcie()
-{
- auto pcie_device = PcieDevice::create();
- CHECK_EXPECTED(pcie_device);
- // Upcasting to Device unique_ptr (from PcieDevice unique_ptr)
- auto device = std::unique_ptr<Device>(pcie_device.release());
- return device;
-}
-
-Expected<std::unique_ptr<Device>> Device::create_pcie(const hailo_pcie_device_info_t &device_info)
-{
- auto pcie_device = PcieDevice::create(device_info);
- CHECK_EXPECTED(pcie_device);
- // Upcasting to Device unique_ptr (from PcieDevice unique_ptr)
- auto device = std::unique_ptr<Device>(pcie_device.release());
- return device;
-}
-
-Expected<std::unique_ptr<Device>> Device::create_eth(const hailo_eth_device_info_t &device_info)
-{
- auto eth_device = EthernetDevice::create(device_info);
- CHECK_EXPECTED(eth_device);
- // Upcasting to Device unique_ptr (from EthernetDevice unique_ptr)
- auto device = std::unique_ptr<Device>(eth_device.release());
- return device;
-}
-
-Expected<std::unique_ptr<Device>> Device::create_eth(const std::string &ip_addr)
-{
- auto eth_device = EthernetDevice::create(ip_addr);
- CHECK_EXPECTED(eth_device);
- // Upcasting to Device unique_ptr (from EthernetDevice unique_ptr)
- auto device = std::unique_ptr<Device>(eth_device.release());
- return device;
-}
-
-Expected<hailo_pcie_device_info_t> Device::parse_pcie_device_info(const std::string &device_info_str)
-{
- const bool LOG_ON_FAILURE = true;
- return PcieDevice::parse_pcie_device_info(device_info_str, LOG_ON_FAILURE);
-}
-
-Expected<std::string> Device::pcie_device_info_to_string(const hailo_pcie_device_info_t &device_info)
-{
- return PcieDevice::pcie_device_info_to_string(device_info);
-}
-
-Expected<Device::Type> Device::get_device_type(const std::string &device_id)
-{
- const bool DONT_LOG_ON_FAILURE = false;
- if (CoreDevice::DEVICE_ID == device_id) {
- return Type::CORE;
- }
- else if (auto pcie_info = PcieDevice::parse_pcie_device_info(device_id, DONT_LOG_ON_FAILURE)) {
- return Type::PCIE;
- }
- else if (auto eth_info = EthernetDevice::parse_eth_device_info(device_id, DONT_LOG_ON_FAILURE)) {
- return Type::ETH;
- }
- else {
- LOGGER__ERROR("Invalid device id {}", device_id);
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
-}
-
-uint32_t Device::get_control_sequence()
-{
- return m_control_sequence;
-}
-
-bool Device::is_control_version_supported()
-{
- return m_is_control_version_supported;
-}
-
-Device::Type Device::get_type() const
-{
- return m_type;
-}
-
-Expected<hailo_stream_interface_t> Device::get_default_streams_interface() const
-{
- switch(m_type) {
- case Type::PCIE:
- return HAILO_STREAM_INTERFACE_PCIE;
- case Type::CORE:
- return HAILO_STREAM_INTERFACE_CORE;
- case Type::ETH:
- return HAILO_STREAM_INTERFACE_ETH;
- default:
- LOGGER__ERROR("Failed to get default streams interface.");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-}
-
-hailo_status Device::set_fw_logger(hailo_fw_logger_level_t level, uint32_t interface_mask)
-{
- return Control::set_fw_logger(*this, level, interface_mask);
-}
-
-hailo_status Device::set_throttling_state(bool should_activate)
-{
- return Control::set_throttling_state(*this, should_activate);
-}
-
-Expected<bool> Device::get_throttling_state()
-{
- return Control::get_throttling_state(*this);
-}
-
-hailo_status Device::write_memory(uint32_t address, const MemoryView &data)
-{
- return Control::write_memory(*this, address, data.data(), static_cast<uint32_t>(data.size()));
-}
-
-hailo_status Device::read_memory(uint32_t address, MemoryView &data)
-{
- return Control::read_memory(*this, address, data.data(), static_cast<uint32_t>(data.size()));
-}
-
-hailo_status Device::wd_enable(hailo_cpu_id_t cpu_id)
-{
- return static_cast<hailo_status>(Control::wd_enable(*this, static_cast<uint8_t>(cpu_id), true));
-}
-
-hailo_status Device::wd_disable(hailo_cpu_id_t cpu_id)
-{
- return Control::wd_enable(*this, static_cast<uint8_t>(cpu_id), false);
-}
-
-hailo_status Device::wd_config(hailo_cpu_id_t cpu_id, uint32_t wd_cycles, hailo_watchdog_mode_t wd_mode)
-{
- CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_type = CONTROL_PROTOCOL__WATCHDOG_NUM_MODES; // set invalid value
- switch(wd_mode) {
- case HAILO_WATCHDOG_MODE_HW_SW:
- wd_type = CONTROL_PROTOCOL__WATCHDOG_MODE_HW_SW;
- break;
- case HAILO_WATCHDOG_MODE_HW_ONLY:
- wd_type = CONTROL_PROTOCOL__WATCHDOG_MODE_HW_ONLY;
- break;
- default:
- LOGGER__ERROR("Invalid wd_mode");
- return HAILO_INVALID_ARGUMENT;
- }
- return Control::wd_config(*this, static_cast<uint8_t>(cpu_id), wd_cycles, wd_type);
-}
-
-Expected<uint32_t> Device::previous_system_state(hailo_cpu_id_t cpu_id)
-{
- CONTROL_PROTOCOL__system_state_t res = {};
- auto status = Control::previous_system_state(*this, static_cast<uint8_t>(cpu_id), &res);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return res;
-}
-
-hailo_status Device::set_pause_frames(bool rx_pause_frames_enable)
-{
- return Control::set_pause_frames(*this, rx_pause_frames_enable);
-}
-
-hailo_status Device::i2c_read(const hailo_i2c_slave_config_t &slave_config, uint32_t register_address, MemoryView &data)
-{
- return Control::i2c_read(*this, &slave_config, register_address, data.data(), static_cast<uint32_t>(data.size()));
-}
-
-hailo_status Device::i2c_write(const hailo_i2c_slave_config_t &slave_config, uint32_t register_address, const MemoryView &data)
-{
- return Control::i2c_write(*this, &slave_config, register_address, data.data(), static_cast<uint32_t>(data.size()));
-}
-
-Expected<float32_t> Device::power_measurement(hailo_dvm_options_t dvm, hailo_power_measurement_types_t measurement_type)
-{
- float32_t res = 0;
- auto status = Control::power_measurement(*this, static_cast<CONTROL_PROTOCOL__dvm_options_t>(dvm),
- static_cast<CONTROL_PROTOCOL__power_measurement_types_t>(measurement_type), &res);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return res;
-}
-
-hailo_status Device::start_power_measurement(hailo_averaging_factor_t averaging_factor, hailo_sampling_period_t sampling_period)
-{
- return Control::start_power_measurement(*this, static_cast<CONTROL_PROTOCOL__averaging_factor_t>(averaging_factor),
- static_cast<CONTROL_PROTOCOL__sampling_period_t>(sampling_period));
-}
-
-hailo_status Device::set_power_measurement(hailo_measurement_buffer_index_t buffer_index, hailo_dvm_options_t dvm, hailo_power_measurement_types_t measurement_type)
-{
- return Control::set_power_measurement(*this, buffer_index, static_cast<CONTROL_PROTOCOL__dvm_options_t>(dvm), static_cast<CONTROL_PROTOCOL__power_measurement_types_t>(measurement_type));
-}
-
-Expected<hailo_power_measurement_data_t> Device::get_power_measurement(hailo_measurement_buffer_index_t buffer_index, bool should_clear)
-{
- hailo_power_measurement_data_t measurement_data = {};
- auto status = Control::get_power_measurement(*this, buffer_index, should_clear, &measurement_data);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return measurement_data;
-}
-
-hailo_status Device::stop_power_measurement()
-{
- return Control::stop_power_measurement(*this);
-}
-
-Expected<hailo_chip_temperature_info_t> Device::get_chip_temperature()
-{
- hailo_chip_temperature_info_t res = {};
- auto status = Control::get_chip_temperature(*this, &res);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return res;
-}
-
-hailo_status Device::test_chip_memories()
-{
- return Control::test_chip_memories(*this);
-}
-
-hailo_status Device::set_sleep_state(hailo_sleep_state_t sleep_state)
-{
- return Control::set_sleep_state(*this, sleep_state);
-}
-
-hailo_status Device::direct_write_memory(uint32_t address, const void *buffer, uint32_t size)
-{
- (void) address;
- (void) buffer;
- (void) size;
- return HAILO_NOT_IMPLEMENTED;
-}
-
-hailo_status Device::direct_read_memory(uint32_t address, void *buffer, uint32_t size)
-{
- (void) address;
- (void) buffer;
- (void) size;
- return HAILO_NOT_IMPLEMENTED;
-}
-
-Expected<hailo_device_identity_t> Device::identify()
-{
- return Control::identify(*this);
-}
-
-Expected<hailo_core_information_t> Device::core_identify()
-{
- hailo_core_information_t res = {};
- auto status = Control::core_identify(*this, &res);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return res;
-}
-
-Expected<hailo_extended_device_information_t> Device::get_extended_device_information()
-{
- return Control::get_extended_device_information(*this);
-}
-
-// Note: This function needs to be called after each reset/fw_update if we want the device's
-// state to remain valid after these ops (see HRT-3116)
-hailo_status Device::update_fw_state()
-{
- // Assuming FW is loaded, send identify
- auto board_info_expected = Control::identify(*this);
- CHECK_EXPECTED_AS_STATUS(board_info_expected);
- hailo_device_identity_t board_info = board_info_expected.release();
-
- if ((FIRMWARE_VERSION_MAJOR == board_info.fw_version.major) &&
- (FIRMWARE_VERSION_MINOR == board_info.fw_version.minor)) {
- m_is_control_version_supported = true;
- } else {
- LOGGER__WARNING("Unsupported firmware operation. Host: {}.{}.{}, Device: {}.{}.{}{}",
- FIRMWARE_VERSION_MAJOR,
- FIRMWARE_VERSION_MINOR,
- FIRMWARE_VERSION_REVISION,
- board_info.fw_version.major,
- board_info.fw_version.minor,
- board_info.fw_version.revision,
- DEV_STRING_NOTE(board_info.is_release));
- m_is_control_version_supported = false;
- }
- m_device_architecture = board_info.device_architecture;
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Device::fw_interact(uint8_t *request_buffer, size_t request_size,
- uint8_t *response_buffer, size_t *response_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- CONTROL_PROTOCOL__request_t *request = (CONTROL_PROTOCOL__request_t *)(request_buffer);
- uint32_t opcode = HAILO_CONTROL_OPCODE_COUNT;
- ASSERT(NULL != request_buffer);
- ASSERT(NULL != response_buffer);
- hailo_cpu_id_t cpu_id;
-
- opcode = BYTE_ORDER__ntohl(request->header.common_header.opcode);
- /* Make sure that the version is supported or opcode is critical */
- if (!m_is_control_version_supported &&
- !g_CONTROL_PROTOCOL__is_critical[opcode]){
- LOGGER__ERROR(
- "Operation {} is not allowed when FW version in not supported. Host supported FW version is {}.{}.{}",
- BYTE_ORDER__ntohl(request->header.common_header.opcode),
- FIRMWARE_VERSION_MAJOR, FIRMWARE_VERSION_MINOR, FIRMWARE_VERSION_REVISION
- );
- return HAILO_UNSUPPORTED_FW_VERSION;
- }
- /* Get the CPU ID */
- cpu_id = (hailo_cpu_id_t)g_CONTROL_PROTOCOL__cpu_id[opcode];
-
- status = this->fw_interact_impl(request_buffer, request_size, response_buffer, response_size, cpu_id);
-
- // Always increment sequence
- this->increment_control_sequence();
- // Check this->fw_interact_impl
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Device::set_overcurrent_state(bool should_activate)
-{
- return Control::set_overcurrent_state(*this, should_activate);
-}
-
-Expected<bool> Device::get_overcurrent_state()
-{
- return Control::get_overcurrent_state(*this);
-}
-
-Expected<hailo_health_info_t> Device::get_health_information()
-{
- return Control::get_health_information(*this);
-}
-
-Expected<std::vector<uint8_t>> Device::get_number_of_dynamic_contexts_per_network_group()
-{
- CONTROL_PROTOCOL__context_switch_main_header_t context_switch_main_header{};
- const auto status = Control::get_context_switch_main_header(*this, &context_switch_main_header);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- std::vector<uint8_t> number_of_contexts_per_network_group;
- for (auto network_group_index = 0; network_group_index < context_switch_main_header.application_count; network_group_index++) {
- const uint32_t num_contexts = context_switch_main_header.application_header[network_group_index].dynamic_contexts_count;
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(num_contexts), HAILO_INTERNAL_FAILURE, "num_contexts must fit in one byte");
- number_of_contexts_per_network_group.emplace_back(static_cast<uint8_t>(num_contexts));
- }
-
- return number_of_contexts_per_network_group;
-}
-
-Expected<Buffer> Device::download_context_action_list(uint32_t network_group_id, uint8_t context_type,
- uint8_t context_index, uint32_t *base_address, uint32_t *batch_counter, uint16_t max_size)
-{
- CHECK_ARG_NOT_NULL_AS_EXPECTED(base_address);
- CHECK_ARG_NOT_NULL_AS_EXPECTED(batch_counter);
-
- // Allocate room for an action list of at most max_size bytes
- auto action_list = Buffer::create(max_size);
- CHECK_EXPECTED(action_list);
-
- uint32_t base_address_local = 0;
- uint32_t batch_counter_local = 0;
- uint16_t actual_size = 0;
- const auto status = Control::download_context_action_list(*this, network_group_id,
- (CONTROL_PROTOCOL__context_switch_context_type_t)context_type, context_index, action_list->size(),
- &base_address_local, action_list->data(), &actual_size, &batch_counter_local);
- CHECK_SUCCESS_AS_EXPECTED(status);
- CHECK_AS_EXPECTED(actual_size <= max_size, HAILO_INTERNAL_FAILURE);
-
- // Create a copy of the list, truncating to the needed size
- auto final_action_list = Buffer::create(action_list->data(), actual_size);
- CHECK_EXPECTED(action_list);
-
- // Transfer ownership of out params
- *base_address = base_address_local;
- *batch_counter = batch_counter_local;
-
- return final_action_list.release();
-}
-
-hailo_status Device::set_context_action_list_timestamp_batch(uint16_t batch_index)
-{
- static const bool ENABLE_USER_CONFIG = true;
- return Control::config_context_switch_timestamp(*this, batch_index, ENABLE_USER_CONFIG);
-}
-
-hailo_status Device::set_context_switch_breakpoint(uint8_t breakpoint_id, bool break_at_any_network_group_index,
- uint8_t network_group_index, bool break_at_any_batch_index, uint16_t batch_index, bool break_at_any_context_index,
- uint8_t context_index, bool break_at_any_action_index, uint16_t action_index)
-{
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t breakpoint_data = {
- break_at_any_network_group_index,
- network_group_index,
- break_at_any_batch_index,
- batch_index,
- break_at_any_context_index,
- context_index,
- break_at_any_action_index,
- action_index};
-
- auto status = Control::config_context_switch_breakpoint(*this, breakpoint_id,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_BREAKPOINT_CONTROL_SET, &breakpoint_data);
- CHECK_SUCCESS(status, "Failed Setting context switch breakpoint in continue breakpoint");
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Device::continue_context_switch_breakpoint(uint8_t breakpoint_id)
-{
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t breakpoint_data = {false, 0, false, 0, false, 0, false, 0};
-
- auto status = Control::config_context_switch_breakpoint(*this, breakpoint_id,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_BREAKPOINT_CONTROL_CONTINUE, &breakpoint_data);
- CHECK_SUCCESS(status, "Failed Setting context switch breakpoint in continue breakpoint");
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Device::clear_context_switch_breakpoint(uint8_t breakpoint_id)
-{
- CONTROL_PROTOCOL__context_switch_breakpoint_data_t breakpoint_data = {false, 0, false, 0, false, 0, false, 0};
-
- auto status = Control::config_context_switch_breakpoint(*this, breakpoint_id,
- CONTROL_PROTOCOL__CONTEXT_SWITCH_BREAKPOINT_CONTROL_CLEAR, &breakpoint_data);
- CHECK_SUCCESS(status, "Failed Setting context switch breakpoint in clear breakpoint");
-
- return HAILO_SUCCESS;
-}
-
-Expected<uint8_t> Device::get_context_switch_breakpoint_status(uint8_t breakpoint_id)
-{
- CONTROL_PROTOCOL__context_switch_debug_sys_status_t breakpoint_status =
- CONTROL_PROTOCOL__CONTEXT_SWITCH_DEBUG_SYS_STATUS_COUNT;
-
- auto status = Control::get_context_switch_breakpoint_status(*this, breakpoint_id,
- &breakpoint_status);
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed getting context switch breakpoint");
-
- return static_cast<uint8_t>(breakpoint_status);
-}
-
-Expected<std::unique_ptr<Device>> Device::create_core()
-{
- auto core_device = CoreDevice::create();
- CHECK_EXPECTED(core_device);
- // Upcasting to Device unique_ptr (from CoreDevice unique_ptr)
- auto device = std::unique_ptr<Device>(core_device.release());
- return device;
-}
-
-} /* namespace hailort */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/device_internal.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/d2h_events_parser.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/control.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/control_protocol.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file control.cpp
+ * @brief Implements module which allows controling Hailo chip.
+ **/
+
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+
+#include "hef/hef_internal.hpp"
+#include "device_common/control.hpp"
+#include "hw_consts.hpp"
+
+#include "control_protocol.h"
+#include "byte_order.h"
+#include "firmware_status.h"
+#include "firmware_header_utils.h"
+#include "d2h_events.h"
+#include <array>
+
+
+namespace hailort
+{
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+#define POWER_MEASUREMENT_DELAY_MS(__sample_period, __average_factor) \
+ (static_cast<uint32_t>((__sample_period) / 1000.0 * (__average_factor) * 2 * 1.2))
+
+#define OVERCURRENT_PROTECTION_WARNING ( \
+ "Using the overcurrent protection dvm for power measurement will disable the overcurrent protection.\n" \
+ "If only taking one measurement, the protection will resume automatically.\n" \
+ "If doing continuous measurement, to enable overcurrent protection again you have to stop the power measurement on this dvm." \
+ )
+
+typedef std::array<std::array<float64_t, CONTROL_PROTOCOL__POWER_MEASUREMENT_TYPES__COUNT>, CONTROL_PROTOCOL__DVM_OPTIONS_COUNT> power_conversion_multiplier_t;
+
+
+Expected<hailo_device_identity_t> control__parse_identify_results(CONTROL_PROTOCOL_identify_response_t *identify_response)
+{
+ hailo_device_identity_t board_info;
+
+ CHECK_AS_EXPECTED(nullptr != identify_response, HAILO_INVALID_ARGUMENT);
+
+ // Store identify response inside control
+ board_info.protocol_version = BYTE_ORDER__ntohl(identify_response->protocol_version);
+ board_info.logger_version = BYTE_ORDER__ntohl(identify_response->logger_version);
+ (void)memcpy(&(board_info.fw_version),
+ &(identify_response->fw_version),
+ sizeof(board_info.fw_version));
+ board_info.board_name_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->board_name_length);
+ (void)memcpy(&(board_info.board_name),
+ &(identify_response->board_name),
+ BYTE_ORDER__ntohl(identify_response->board_name_length));
+ board_info.serial_number_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->serial_number_length);
+ (void)memcpy(&(board_info.serial_number),
+ &(identify_response->serial_number),
+ BYTE_ORDER__ntohl(identify_response->serial_number_length));
+ board_info.part_number_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->part_number_length);
+ (void)memcpy(&(board_info.part_number),
+ &(identify_response->part_number),
+ BYTE_ORDER__ntohl(identify_response->part_number_length));
+ board_info.product_name_length = (uint8_t)BYTE_ORDER__ntohl(identify_response->product_name_length);
+ (void)memcpy(&(board_info.product_name),
+ &(identify_response->product_name),
+ BYTE_ORDER__ntohl(identify_response->product_name_length));
+
+ // Check if the firmware is debug or release
+ board_info.is_release = (!IS_REVISION_DEV(board_info.fw_version.revision));
+
+ // Check if the firmware was compiled with EXTENDED_CONTEXT_SWITCH_BUFFER
+ board_info.extended_context_switch_buffer = IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(board_info.fw_version.revision);
+
+ // Make sure response was from app CPU
+ CHECK_AS_EXPECTED((0 == (board_info.fw_version.revision & REVISION_APP_CORE_FLAG_BIT_MASK)), HAILO_INVALID_FIRMWARE,
+ "Got invalid app FW type, which means the FW was not marked correctly. unmaked FW revision {}", board_info.fw_version.revision);
+
+ // Keep the revision number only
+ board_info.fw_version.revision = GET_REVISION_NUMBER_VALUE(board_info.fw_version.revision);
+
+ board_info.device_architecture = static_cast<hailo_device_architecture_t>(BYTE_ORDER__ntohl(identify_response->device_architecture));
+
+ /* Write identify results to log */
+ LOGGER__INFO("firmware_version is: {}.{}.{}",
+ board_info.fw_version.major,
+ board_info.fw_version.minor,
+ board_info.fw_version.revision
+ );
+ LOGGER__DEBUG("Protocol version: {}", board_info.protocol_version);
+ LOGGER__DEBUG("Logger version: {}", board_info.logger_version);
+ LOGGER__DEBUG("Device architecture code: {}", board_info.device_architecture);
+
+ return board_info;
+}
+
+Expected<hailo_extended_device_information_t> control__parse_get_extended_device_information_results
+ (CONTROL_PROTOCOL__get_extended_device_information_response_t &get_extended_device_information_response)
+{
+ uint8_t local_supported_features;
+ hailo_extended_device_information_t device_info;
+
+ local_supported_features = (uint8_t)BYTE_ORDER__ntohl(get_extended_device_information_response.supported_features);
+
+ device_info.supported_features.ethernet = (local_supported_features &
+ (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_ETHERNET_BIT_OFFSET)) != 0;
+ device_info.supported_features.pcie = (local_supported_features &
+ (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_PCIE_BIT_OFFSET)) != 0;
+ device_info.supported_features.mipi = (local_supported_features &
+ (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_MIPI_BIT_OFFSET)) != 0;
+ device_info.supported_features.current_monitoring = (local_supported_features &
+ (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_CURRENT_MONITORING_BIT_OFFSET)) != 0;
+ device_info.supported_features.mdio = (local_supported_features &
+ (1 << CONTROL_PROTOCOL__SUPPORTED_FEATURES_MDIO_BIT_OFFSET)) != 0;
+ device_info.neural_network_core_clock_rate = BYTE_ORDER__ntohl(get_extended_device_information_response.neural_network_core_clock_rate);
+
+ LOGGER__DEBUG("Max Neural Network Core Clock Rate: {}", device_info.neural_network_core_clock_rate);
+
+ device_info.boot_source = static_cast<hailo_device_boot_source_t>(
+ BYTE_ORDER__ntohl(get_extended_device_information_response.boot_source));
+
+ (void)memcpy(device_info.soc_id,
+ get_extended_device_information_response.soc_id,
+ BYTE_ORDER__ntohl(get_extended_device_information_response.soc_id_length));
+
+ device_info.lcs = get_extended_device_information_response.lcs;
+
+ memcpy(&device_info.unit_level_tracking_id[0], &get_extended_device_information_response.fuse_info, sizeof(device_info.unit_level_tracking_id));
+ memcpy(&device_info.eth_mac_address[0], &get_extended_device_information_response.eth_mac_address[0], BYTE_ORDER__ntohl(get_extended_device_information_response.eth_mac_length));
+ memcpy(&device_info.soc_pm_values, &get_extended_device_information_response.pd_info, sizeof(device_info.soc_pm_values));
+
+ return device_info;
+}
+
+Expected<hailo_health_info_t> control__parse_get_health_information_results
+ (CONTROL_PROTOCOL__get_health_information_response_t *get_health_information_response)
+{
+ hailo_health_info_t health_info;
+
+ CHECK_AS_EXPECTED(nullptr != get_health_information_response, HAILO_INVALID_ARGUMENT);
+
+ health_info.overcurrent_protection_active = get_health_information_response->overcurrent_protection_active;
+ health_info.current_overcurrent_zone = get_health_information_response->current_overcurrent_zone;
+ // Re-convertion to floats after
+ health_info.red_overcurrent_threshold = float32_t(BYTE_ORDER__ntohl(get_health_information_response->red_overcurrent_threshold));
+ health_info.overcurrent_throttling_active = get_health_information_response->overcurrent_throttling_active;
+ health_info.temperature_throttling_active = get_health_information_response->temperature_throttling_active;
+ health_info.current_temperature_zone = get_health_information_response->current_temperature_zone;
+ health_info.current_temperature_throttling_level = get_health_information_response->current_temperature_throttling_level;
+ memcpy(&health_info.temperature_throttling_levels[0], &get_health_information_response->temperature_throttling_levels[0],
+ BYTE_ORDER__ntohl(get_health_information_response->temperature_throttling_levels_length));
+ health_info.orange_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->orange_temperature_threshold);
+ health_info.orange_hysteresis_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->orange_hysteresis_temperature_threshold);
+ health_info.red_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->red_temperature_threshold);
+ health_info.red_hysteresis_temperature_threshold = BYTE_ORDER__ntohl(get_health_information_response->red_hysteresis_temperature_threshold);
+ health_info.requested_overcurrent_clock_freq = BYTE_ORDER__ntohl(get_health_information_response->requested_overcurrent_clock_freq);
+ health_info.requested_temperature_clock_freq = BYTE_ORDER__ntohl(get_health_information_response->requested_temperature_clock_freq);
+ return health_info;
+}
+
+
+hailo_status control__parse_core_identify_results(CONTROL_PROTOCOL__core_identify_response_t *identify_response,
+ hailo_core_information_t *core_info)
+{
+ CHECK_ARG_NOT_NULL(core_info);
+ CHECK_ARG_NOT_NULL(identify_response);
+
+ // Store identify response inside control
+ (void)memcpy(&(core_info->fw_version),
+ &(identify_response->fw_version),
+ sizeof(core_info->fw_version));
+
+ // Check if firmware is at debug/release
+ core_info->is_release = !(IS_REVISION_DEV(core_info->fw_version.revision));
+
+ // Check if the firmware was compiled with EXTENDED_CONTEXT_SWITCH_BUFFER
+ core_info->extended_context_switch_buffer = IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(core_info->fw_version.revision);
+
+ // Make sure response was from core CPU
+ CHECK((REVISION_APP_CORE_FLAG_BIT_MASK == (core_info->fw_version.revision & REVISION_APP_CORE_FLAG_BIT_MASK)), HAILO_INVALID_FIRMWARE,
+ "Got invalid core FW type, which means the FW was not marked correctly. unmaked FW revision {}", core_info->fw_version.revision);
+
+ // Keep the revision number only
+ core_info->fw_version.revision = GET_REVISION_NUMBER_VALUE(core_info->fw_version.revision);
+
+ // Write identify results to log
+ LOGGER__INFO("core firmware_version is: {}.{}.{}",
+ core_info->fw_version.major,
+ core_info->fw_version.minor,
+ core_info->fw_version.revision
+ );
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::validate_arch_supported(Device &device, const std::vector<hailo_device_architecture_t> &supported_archs)
+{
+ auto dev_arch = device.get_architecture();
+ CHECK_EXPECTED_AS_STATUS(dev_arch);
+ for (const auto &arch : supported_archs) {
+ if (*dev_arch == arch) {
+ return HAILO_SUCCESS;
+ }
+ }
+ LOGGER__ERROR("Control is not supported for this device architecture - {}", HailoRTCommon::get_device_arch_str(*dev_arch));
+ return HAILO_NOT_SUPPORTED;
+}
+
+hailo_status Control::parse_and_validate_response(uint8_t *message, uint32_t message_size,
+ CONTROL_PROTOCOL__response_header_t **header, CONTROL_PROTOCOL__payload_t **payload,
+ CONTROL_PROTOCOL__request_t *request)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__status_t fw_status = {};
+ const char *firmware_status_text = NULL;
+
+ /* Parse the response */
+ common_status = CONTROL_PROTOCOL__parse_response(message, message_size, header, payload, &fw_status);
+ if (HAILO_STATUS__CONTROL_PROTOCOL__INVALID_VERSION == common_status) {
+ status = HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION;
+ }
+ else {
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ }
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ /* Valdiate response was succesfull - both major and minor should be error free */
+ if (0 != fw_status.major_status) {
+ status = HAILO_FW_CONTROL_FAILURE;
+ LOGGER__ERROR("Firmware control has failed. Major status: {:#x}, Minor status: {:#x}",
+ fw_status.major_status,
+ fw_status.minor_status);
+ common_status = FIRMWARE_STATUS__get_textual((FIRMWARE_STATUS_t)fw_status.major_status, &firmware_status_text);
+ if (HAILO_COMMON_STATUS__SUCCESS == common_status) {
+ LOGGER__ERROR("Firmware major status: {}", firmware_status_text);
+ } else {
+ LOGGER__ERROR("Cannot find textual address for firmware status {:#x}, common_status = {}",
+ (FIRMWARE_STATUS_t)fw_status.major_status, common_status);
+ }
+ common_status = FIRMWARE_STATUS__get_textual((FIRMWARE_STATUS_t)fw_status.minor_status, &firmware_status_text);
+ if (HAILO_COMMON_STATUS__SUCCESS == common_status) {
+ LOGGER__ERROR("Firmware minor status: {}", firmware_status_text);
+ } else {
+ LOGGER__ERROR("Cannot find textual address for firmware status {:#x}, common_status = {}",
+ (FIRMWARE_STATUS_t)fw_status.minor_status, common_status);
+ }
+
+ if ((HAILO_CONTROL_STATUS_UNSUPPORTED_OPCODE == fw_status.minor_status) ||
+ (HAILO_CONTROL_STATUS_UNSUPPORTED_OPCODE == fw_status.major_status)) {
+ status = HAILO_UNSUPPORTED_OPCODE;
+ LOGGER__ERROR("Opcode {} is not supported",
+ CONTROL_PROTOCOL__get_textual_opcode((CONTROL_PROTOCOL__OPCODE_t)BYTE_ORDER__ntohl(request->header.common_header.opcode)));
+ }
+ goto exit;
+ }
+
+ /* Validate response opcode is same as request */
+ if (request->header.common_header.opcode != (*header)->common_header.opcode) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Invalid opcode received from FW");
+ goto exit;
+ }
+
+ /* Validate response version is same as request */
+ if (request->header.common_header.version != (*header)->common_header.version) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Invalid protocol version received from FW");
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+Expected<hailo_device_identity_t> Control::identify(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL_identify_response_t *identify_response = NULL;
+
+ /* Validate arguments */
+ common_status = CONTROL_PROTOCOL__pack_identify_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ identify_response = (CONTROL_PROTOCOL_identify_response_t *)(payload->parameters);
+
+ return control__parse_identify_results(identify_response);
+}
+
+hailo_status Control::core_identify(Device &device, hailo_core_information_t *core_info)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__core_identify_response_t *identify_response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(core_info);
+
+ common_status = CONTROL_PROTOCOL__pack_core_identify_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ identify_response = (CONTROL_PROTOCOL__core_identify_response_t *)(payload->parameters);
+
+ /* Store results inside contol object */
+ status = control__parse_core_identify_results(identify_response, core_info);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+
+hailo_status Control::set_fw_logger(Device &device, hailo_fw_logger_level_t level, uint32_t interface_mask)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ auto common_status = CONTROL_PROTOCOL__pack_set_fw_logger_request(&request, &request_size, device.get_control_sequence(), level,
+ static_cast<uint8_t>(interface_mask));
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::set_clock_freq(Device &device, uint32_t clock_freq)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ auto common_status = CONTROL_PROTOCOL__pack_set_clock_freq_request(&request, &request_size, device.get_control_sequence(), clock_freq);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::set_throttling_state(Device &device, bool should_activate)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ auto common_status = CONTROL_PROTOCOL__pack_set_throttling_state_request(&request, &request_size, device.get_control_sequence(), should_activate);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<bool> Control::get_throttling_state(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_throttling_state_response_t *get_throttling_state_response = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ common_status = CONTROL_PROTOCOL__pack_get_throttling_state_request(&request, &request_size, device.get_control_sequence());
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ get_throttling_state_response = (CONTROL_PROTOCOL__get_throttling_state_response_t *)(payload->parameters);
+ return std::move(get_throttling_state_response->is_active);
+}
+
+hailo_status Control::set_overcurrent_state(Device &device, bool should_activate)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ auto common_status = CONTROL_PROTOCOL__pack_set_overcurrent_state_request(&request, &request_size, device.get_control_sequence(), should_activate);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<bool> Control::get_overcurrent_state(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_overcurrent_state_response_t *get_overcurrent_state_response = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ common_status = CONTROL_PROTOCOL__pack_get_overcurrent_state_request(&request, &request_size, device.get_control_sequence());
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ get_overcurrent_state_response = (CONTROL_PROTOCOL__get_overcurrent_state_response_t *)(payload->parameters);
+ return std::move(get_overcurrent_state_response->is_required);
+}
+
+Expected<CONTROL_PROTOCOL__hw_consts_t> Control::get_hw_consts(Device &device)
+{
+ size_t request_size = 0;
+ CONTROL_PROTOCOL__request_t request = {};
+ auto common_status = CONTROL_PROTOCOL__pack_get_hw_consts_request(&request, &request_size, device.get_control_sequence());
+ auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ const auto &response = *reinterpret_cast<CONTROL_PROTOCOL__get_hw_consts_response_t*>(payload->parameters);
+ return Expected<CONTROL_PROTOCOL__hw_consts_t>(response.hw_consts);
+}
+
+hailo_status Control::write_memory_chunk(Device &device, uint32_t address, const uint8_t *data, uint32_t chunk_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ ASSERT(NULL != data);
+
+ /* Validate chunk size is valid */
+ ASSERT(CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE >= chunk_size);
+ ASSERT(0 != chunk_size);
+
+ common_status = CONTROL_PROTOCOL__pack_write_memory_request(&request, &request_size, device.get_control_sequence(), address, data, chunk_size);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::write_memory(Device &device, uint32_t address, const uint8_t *data, uint32_t data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ uint32_t current_write_address = address;
+ const uint8_t* current_data_address = data;
+ uint32_t chunk_size = CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE;
+ uint32_t number_of_chunks = data_length / chunk_size;
+ uint32_t data_chunk_leftover = data_length % chunk_size;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ if (data_length >= chunk_size) {
+ for (size_t i = 0; i < number_of_chunks; i++ ) {
+ /* Write current memory chunk */
+ status = write_memory_chunk(device, current_write_address, current_data_address, chunk_size);
+ CHECK_SUCCESS(status);
+
+ current_write_address += chunk_size;
+ current_data_address += chunk_size;
+ }
+ }
+
+ if (data_chunk_leftover > 0) {
+ /* Write leftover */
+ status = write_memory_chunk(device, current_write_address, current_data_address, data_chunk_leftover);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::read_memory_chunk(Device &device, uint32_t address, uint8_t *data, uint32_t chunk_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ uint32_t actual_read_data_length = 0;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__read_memory_response_t *read_memory_response = NULL;
+
+ /* Validate arguments */
+ ASSERT(NULL != data);
+
+ /* Validate chunk size is valid */
+ ASSERT(CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE >= chunk_size);
+ ASSERT(0 != chunk_size);
+
+ common_status = CONTROL_PROTOCOL__pack_read_memory_request(&request, &request_size, device.get_control_sequence(), address, chunk_size);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ read_memory_response = (CONTROL_PROTOCOL__read_memory_response_t *)(payload->parameters);
+ actual_read_data_length = BYTE_ORDER__ntohl(read_memory_response->data_length);
+ if (chunk_size != actual_read_data_length) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Did not read all data from control response");
+ goto exit;
+ }
+ (void)memcpy(data, &read_memory_response->data[0], actual_read_data_length);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::read_memory(Device &device, uint32_t address, uint8_t *data, uint32_t data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ uint32_t current_read_address = address;
+ uint8_t* current_data_address = data;
+ uint32_t chunk_size = CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE;
+ uint32_t number_of_chunks = data_length / chunk_size;
+ uint32_t data_chunk_leftover = data_length % chunk_size;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ if (data_length >= chunk_size) {
+ for (size_t i = 0; i < number_of_chunks; i++ ) {
+ /* Read current memory chunk */
+ status = read_memory_chunk(device, current_read_address, current_data_address, chunk_size);
+ CHECK_SUCCESS(status);
+
+ current_read_address += chunk_size;
+ current_data_address += chunk_size;
+ }
+ }
+
+ if (data_chunk_leftover > 0) {
+ /* Read leftover */
+ status = read_memory_chunk(device, current_read_address, current_data_address, data_chunk_leftover);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::open_stream(Device &device, uint8_t dataflow_manager_id, bool is_input)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_open_stream_request(&request, &request_size, device.get_control_sequence(),
+ dataflow_manager_id, is_input);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::close_stream(Device &device, uint8_t dataflow_manager_id, bool is_input)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_close_stream_request(&request, &request_size, device.get_control_sequence(),
+ dataflow_manager_id, is_input);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::close_all_streams(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ /* Close all input streams */
+ status = close_stream(device, CONTROL_PROTOCOL__ALL_DATAFLOW_MANAGERS, true);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Close all output streams */
+ status = close_stream(device, CONTROL_PROTOCOL__ALL_DATAFLOW_MANAGERS, false);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_stream_udp_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
+ uint32_t dataflow_manager_id_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_stream_udp_input_request(&request, &request_size,
+ device.get_control_sequence(), params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
+ dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
+
+ /* Validate read data is data size */
+ if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ goto exit;
+ }
+
+ dataflow_manager_id = response->dataflow_manager_id;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_stream_udp_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
+ uint32_t dataflow_manager_id_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_stream_udp_output_request(&request, &request_size,
+ device.get_control_sequence(), params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
+ dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
+
+ /* Validate read data is data size */
+ if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ goto exit;
+ }
+
+ dataflow_manager_id = response->dataflow_manager_id;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_stream_mipi_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
+ uint32_t dataflow_manager_id_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_stream_mipi_input_request(&request, &request_size,
+ device.get_control_sequence(), params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
+ dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
+
+ /* Validate read data is data size */
+ if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ goto exit;
+ }
+
+ dataflow_manager_id = response->dataflow_manager_id;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_stream_mipi_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
+ uint32_t dataflow_manager_id_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_stream_mipi_output_request(&request, &request_size,
+ device.get_control_sequence(), params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
+ dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
+
+ /* Validate read data is data size */
+ if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ goto exit;
+ }
+
+ dataflow_manager_id = response->dataflow_manager_id;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_stream_pcie_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
+ uint32_t dataflow_manager_id_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_stream_pcie_input_request(&request, &request_size,
+ device.get_control_sequence(), params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
+ dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
+
+ /* Validate read data is data size */
+ if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ goto exit;
+ }
+
+ dataflow_manager_id = response->dataflow_manager_id;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_stream_pcie_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__config_stream_response_t *response = NULL;
+ uint32_t dataflow_manager_id_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_stream_pcie_output_request(&request, &request_size,
+ device.get_control_sequence(), params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__config_stream_response_t *)(payload->parameters);
+ dataflow_manager_id_length = BYTE_ORDER__ntohl(response->dataflow_manager_id_length);
+
+ /* Validate read data is data size */
+ if (dataflow_manager_id_length != sizeof(response->dataflow_manager_id)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ goto exit;
+ }
+
+ dataflow_manager_id = response->dataflow_manager_id;
+
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+// TODO: needed?
+hailo_status Control::power_measurement(Device &device, CONTROL_PROTOCOL__dvm_options_t dvm,
+ CONTROL_PROTOCOL__power_measurement_types_t measurement_type, float32_t *measurement)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__power_measurement_response_t *response = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(measurement);
+
+ common_status = CONTROL_PROTOCOL__pack_power_measurement_request(&request, &request_size, device.get_control_sequence(),
+ dvm, measurement_type);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ response = (CONTROL_PROTOCOL__power_measurement_response_t*)(payload->parameters);
+
+ LOGGER__INFO("The chosen dvm type is: {}, and measurement type: {}", response->dvm,
+ response->measurement_type);
+ if (CONTROL_PROTOCOL__DVM_OPTIONS_OVERCURRENT_PROTECTION == response->dvm) {
+ LOGGER__WARN(OVERCURRENT_PROTECTION_WARNING);
+ }
+
+ *measurement = response->power_measurement;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::set_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, CONTROL_PROTOCOL__dvm_options_t dvm,
+ CONTROL_PROTOCOL__power_measurement_types_t measurement_type)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__set_power_measurement_response_t *response = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ CHECK(CONTROL_PROTOCOL__MAX_NUMBER_OF_POWER_MEASUREMETS > buffer_index,
+ HAILO_INVALID_ARGUMENT, "Invalid power measurement index {}", buffer_index);
+
+ common_status = CONTROL_PROTOCOL__pack_set_power_measurement_request(&request, &request_size, device.get_control_sequence(),
+ buffer_index, dvm, measurement_type);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ response = (CONTROL_PROTOCOL__set_power_measurement_response_t*)(payload->parameters);
+
+ LOGGER__INFO("The chosen dvm type is: {}, and measurement type: {}", response->dvm,
+ response->measurement_type);
+ if (CONTROL_PROTOCOL__DVM_OPTIONS_OVERCURRENT_PROTECTION == response->dvm) {
+ LOGGER__WARN(OVERCURRENT_PROTECTION_WARNING);
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::get_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, bool should_clear,
+ hailo_power_measurement_data_t *measurement_data)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_power_measurement_response_t *get_power_response = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Validate arguments */
+ CHECK(CONTROL_PROTOCOL__MAX_NUMBER_OF_POWER_MEASUREMETS > buffer_index,
+ HAILO_INVALID_ARGUMENT, "Invalid power measurement index {}", buffer_index);
+ CHECK_ARG_NOT_NULL(measurement_data);
+ common_status = CONTROL_PROTOCOL__pack_get_power_measurement_request(&request, &request_size, device.get_control_sequence(),
+ buffer_index, should_clear);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+ get_power_response = (CONTROL_PROTOCOL__get_power_measurement_response_t *)(payload->parameters);
+
+ /* Copy measurement data from response to the exported measurement data */
+ measurement_data->average_time_value_milliseconds = get_power_response->average_time_value_milliseconds;
+ measurement_data->average_value = get_power_response->average_value;
+ measurement_data->min_value = get_power_response->min_value;
+ measurement_data->max_value = get_power_response->max_value;
+ measurement_data->total_number_of_samples = BYTE_ORDER__ntohl(get_power_response->total_number_of_samples);
+ LOGGER__DEBUG("avg: {:f}, min: {:f}, max: {:f}",
+ measurement_data->average_value,
+ measurement_data->min_value,
+ measurement_data->max_value);
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::start_power_measurement(Device &device,
+ CONTROL_PROTOCOL__averaging_factor_t averaging_factor , CONTROL_PROTOCOL__sampling_period_t sampling_period)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ uint32_t delay_milliseconds = 0;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ delay_milliseconds = POWER_MEASUREMENT_DELAY_MS(sampling_period, averaging_factor);
+ // There is no logical way that measurement delay can be 0 - because sampling_period and averaging_factor cant be 0
+ // Hence if it is 0 - it means it was 0.xx and we want to round up to 1 in that case
+ if (0 == delay_milliseconds) {
+ delay_milliseconds = 1;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_start_power_measurement_request(&request, &request_size, device.get_control_sequence(),
+ delay_milliseconds, averaging_factor, sampling_period);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::stop_power_measurement(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_stop_power_measurement_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::i2c_write(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
+ const uint8_t *data, uint32_t length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(slave_config);
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Pack request */
+ common_status = CONTROL_PROTOCOL__pack_i2c_write_request(&request, &request_size, device.get_control_sequence(),
+ register_address, static_cast<uint8_t>(slave_config->endianness),
+ slave_config->slave_address, slave_config->register_address_size, slave_config->bus_index, data, length);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
+ &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::i2c_read(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
+ uint8_t *data, uint32_t length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__i2c_read_response_t *response = NULL;
+ uint32_t local_data_length = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(slave_config);
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Pack request */
+ common_status = CONTROL_PROTOCOL__pack_i2c_read_request(&request, &request_size, device.get_control_sequence(),
+ register_address, static_cast<uint8_t>(slave_config->endianness),
+ slave_config->slave_address, slave_config->register_address_size, slave_config->bus_index, length,
+ slave_config->should_hold_bus);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
+ &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__i2c_read_response_t *)(payload->parameters);
+ local_data_length = BYTE_ORDER__ntohl(response->data_length);
+
+ /* Validate read data is data size */
+ if (local_data_length != length) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Read data size from I2C does not match register size. ({} != {})",
+ local_data_length, length);
+ goto exit;
+ }
+
+ /* Copy the returned results back to the user */
+ (void)memcpy(data, response->data, local_data_length);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_core_top(Device &device, CONTROL_PROTOCOL__config_core_top_type_t config_type,
+ CONTROL_PROTOCOL__config_core_top_params_t *params)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(params);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_config_core_top_request(&request, &request_size, device.get_control_sequence(), config_type, params);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::phy_operation(Device &device, CONTROL_PROTOCOL__phy_operation_t operation_type)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_phy_operation_request(&request, &request_size, device.get_control_sequence(), operation_type);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::examine_user_config(Device &device, hailo_fw_user_config_information_t *info)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__examine_user_config_response_t *response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(info);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_examine_user_config(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Save response information into exported struct */
+ response = ((CONTROL_PROTOCOL__examine_user_config_response_t *)(payload->parameters));
+ info->version = BYTE_ORDER__ntohl(response->version);
+ info->entry_count = BYTE_ORDER__ntohl(response->entry_count);
+ info->total_size = BYTE_ORDER__ntohl(response->total_size);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::read_user_config_chunk(Device &device, uint32_t read_offset, uint32_t read_length,
+ uint8_t *buffer, uint32_t *actual_read_data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__read_user_config_response_t *response = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ common_status = CONTROL_PROTOCOL__pack_read_user_config(&request, &request_size, device.get_control_sequence(),
+ read_offset, read_length);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
+ &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ response = (CONTROL_PROTOCOL__read_user_config_response_t *)(payload->parameters);
+ *actual_read_data_length = BYTE_ORDER__ntohl(response->data_length);
+ (void) memcpy(buffer, response->data, *actual_read_data_length);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::read_user_config(Device &device, uint8_t *buffer, uint32_t buffer_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ uint32_t actual_read_data_length = 0;
+ uint32_t read_offset = 0;
+ hailo_fw_user_config_information_t user_config_info = {};
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(buffer);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ status = examine_user_config(device, &user_config_info);
+ CHECK_SUCCESS(status);
+
+ CHECK(buffer_length >= user_config_info.total_size, HAILO_INSUFFICIENT_BUFFER,
+ "read buffer is too small. provided buffer size: {} bytes, user config size: {} bytes", buffer_length,
+ user_config_info.total_size);
+
+ LOGGER__INFO("Preparing to read user configuration. Version: {}, Entry Count: {}, Total Size (bytes): {}",
+ user_config_info.version, user_config_info.entry_count, user_config_info.total_size);
+
+ while (read_offset < user_config_info.total_size) {
+ read_user_config_chunk(device, read_offset, user_config_info.total_size - read_offset,
+ buffer + read_offset, &actual_read_data_length);
+ read_offset += actual_read_data_length;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::write_user_config_chunk(Device &device, uint32_t offset, const uint8_t *data, uint32_t chunk_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ common_status = CONTROL_PROTOCOL__pack_write_user_config_request(&request, &request_size,
+ device.get_control_sequence(), offset, data + offset, chunk_size);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
+ &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::write_user_config(Device &device, const uint8_t *data, uint32_t data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ uint32_t offset = 0;
+ uint32_t chunk_size = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ while (offset < data_length) {
+ chunk_size = MIN(WRITE_CHUNK_SIZE, (data_length - offset));
+ status = write_user_config_chunk(device, offset, data, chunk_size);
+ CHECK_SUCCESS(status);
+ offset += chunk_size;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::erase_user_config(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_erase_user_config_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+
+hailo_status Control::read_board_config(Device &device, uint8_t *buffer, uint32_t buffer_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ uint32_t actual_read_data_length = 0;
+ uint32_t read_offset = 0;
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__read_user_config_response_t *response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(buffer);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ CHECK(buffer_length >= BOARD_CONFIG_SIZE, HAILO_INSUFFICIENT_BUFFER,
+ "read buffer is too small. provided buffer size: {} bytes, board config size: {} bytes", buffer_length,
+ BOARD_CONFIG_SIZE);
+
+ LOGGER__INFO("Preparing to read board configuration");
+ common_status = CONTROL_PROTOCOL__pack_read_board_config(&request, &request_size, device.get_control_sequence(),
+ read_offset, BOARD_CONFIG_SIZE);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
+ &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+ response = (CONTROL_PROTOCOL__read_board_config_response_t *)(payload->parameters);
+ actual_read_data_length = BYTE_ORDER__ntohl(response->data_length);
+ (void) memcpy(buffer, response->data, actual_read_data_length);
+
+ return HAILO_SUCCESS;
+}
+
+
+
+hailo_status Control::write_board_config(Device &device, const uint8_t *data, uint32_t data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ uint32_t write_offset = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ CHECK(BOARD_CONFIG_SIZE >= data_length, HAILO_INVALID_OPERATION,
+ "Invalid size of board config. data_length={}, max_size={}" , data_length, BOARD_CONFIG_SIZE);
+
+ common_status = CONTROL_PROTOCOL__pack_write_board_config_request(&request, &request_size,
+ device.get_control_sequence(), write_offset, data + write_offset, data_length);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer,
+ &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::write_second_stage_to_internal_memory(Device &device, uint32_t offset, uint8_t *data, uint32_t data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request(&request, &request_size, device.get_control_sequence(), offset,
+ data, data_length);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+
+hailo_status Control::copy_second_stage_to_flash(Device &device, MD5_SUM_t *expected_md5, uint32_t second_stage_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(expected_md5);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__copy_second_stage_to_flash_request(&request, &request_size, device.get_control_sequence(), expected_md5, second_stage_size);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::start_firmware_update(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_start_firmware_update_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::finish_firmware_update(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_finish_firmware_update_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::write_firmware_update(Device &device, uint32_t offset, const uint8_t *data, uint32_t data_length)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__write_firmware_update_request(&request, &request_size, device.get_control_sequence(), offset,
+ data, data_length);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::validate_firmware_update(Device &device, MD5_SUM_t *expected_md5, uint32_t firmware_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(expected_md5);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_validate_firmware_update_request(&request, &request_size, device.get_control_sequence(),
+ expected_md5, firmware_size);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::latency_measurement_read(Device &device, uint32_t *inbound_to_outbound_latency_nsec)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__latency_read_response_t *response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(inbound_to_outbound_latency_nsec);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_latency_measurement_read_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ response = (CONTROL_PROTOCOL__latency_read_response_t*)(payload->parameters);
+ *inbound_to_outbound_latency_nsec = BYTE_ORDER__ntohl(response->inbound_to_outbound_latency_nsec);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::latency_measurement_config(Device &device, uint8_t latency_measurement_en,
+ uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index,
+ uint32_t outbound_stream_index)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_latency_measurement_config_request(&request, &request_size, device.get_control_sequence(),
+ latency_measurement_en, inbound_start_buffer_number, outbound_stop_buffer_number,
+ inbound_stream_index, outbound_stream_index);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+
+hailo_status Control::sensor_store_config(Device &device, uint32_t is_first, uint32_t section_index,
+ uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size, uint8_t *data,
+ uint32_t data_length,uint16_t config_height, uint16_t config_width, uint16_t config_fps,
+ uint32_t config_name_length, uint8_t *config_name)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+ CHECK_ARG_NOT_NULL(config_name);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_sensor_store_config_request(&request, &request_size, device.get_control_sequence(), is_first, section_index, start_offset,
+ reset_data_size, sensor_type, total_data_size, data, data_length, config_height,
+ config_width, config_fps, config_name_length, config_name);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::sensor_set_i2c_bus_index(Device &device, uint32_t sensor_type, uint32_t bus_index)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ status = CONTROL_PROTOCOL__pack_sensor_set_i2c_bus_index_request(&request, &request_size, device.get_control_sequence(), sensor_type, bus_index);
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::sensor_load_and_start_config(Device &device, uint32_t section_index)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_sensor_load_and_start_config_request(&request, &request_size, device.get_control_sequence(), section_index);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::sensor_reset(Device &device, uint32_t section_index)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_sensor_reset_request(&request, &request_size, device.get_control_sequence(), section_index);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::sensor_set_generic_i2c_slave(Device &device, uint16_t slave_address,
+ uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_sensor_set_generic_i2c_slave_request(&request, &request_size, device.get_control_sequence(), slave_address, register_address_size, bus_index, should_hold_bus, endianness);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+
+hailo_status Control::sensor_get_config(Device &device, uint32_t section_index, uint32_t offset, uint32_t data_length,
+ uint8_t *data)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ uint32_t actual_read_data_length = 0;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__sensor_get_config_response_t *sensor_get_config_response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_sensor_get_config_request(&request, &request_size, device.get_control_sequence(), section_index, offset, data_length);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ sensor_get_config_response = (CONTROL_PROTOCOL__sensor_get_config_response_t *)(payload->parameters);
+ actual_read_data_length = BYTE_ORDER__ntohl(sensor_get_config_response->data_length);
+ if (data_length != actual_read_data_length) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Did not read all data from control response");
+ goto exit;
+ }
+ (void)memcpy(data, &sensor_get_config_response->data[0], actual_read_data_length);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::sensor_get_sections_info(Device &device, uint8_t *data)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ uint32_t actual_read_data_length = 0;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__sensor_get_sections_info_response_t *get_sections_info_response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(data);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_sensor_get_sections_info_request(&request, &request_size, device.get_control_sequence());
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ get_sections_info_response = (CONTROL_PROTOCOL__sensor_get_sections_info_response_t *)(payload->parameters);
+
+ actual_read_data_length = BYTE_ORDER__ntohl(get_sections_info_response->data_length);
+ if (0 == actual_read_data_length) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Did not read all data from control response");
+ goto exit;
+ }
+ (void)memcpy(data, &get_sections_info_response->data[0], actual_read_data_length);
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::context_switch_set_network_group_header(Device &device,
+ const CONTROL_PROTOCOL__application_header_t &network_group_header)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_context_switch_set_network_group_header_request(&request, &request_size,
+ device.get_control_sequence(), &network_group_header);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::context_switch_set_context_info_chunk(Device &device,
+ const CONTROL_PROTOCOL__context_switch_context_info_single_control_t &context_info)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_context_switch_set_context_info_request(&request, &request_size, device.get_control_sequence(),
+ &context_info);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ /* In case of max memory error, add LOGGER ERROR, and set indicative error to the user */
+ CHECK((CONTEXT_SWITCH_TASK_STATUS_ADD_TRIGGER_FUNCTION_REACHED_FORBIDDEN_MEMORY_SPACE != header->status.major_status),
+ HAILO_OUT_OF_FW_MEMORY,
+ "Configfured network groups Reached maximum device internal memory. please consider using less network groups.");
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::context_switch_set_context_info(Device &device,
+ const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &context_infos)
+{
+ for (const auto &context_info : context_infos) {
+ auto status = context_switch_set_context_info_chunk(device, context_info);
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::idle_time_get_measurement(Device &device, uint64_t *measurement)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__idle_time_get_measurement_response_t *idle_time_get_measurement_response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(measurement);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request with status {:#X}", common_status);
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed idle_time_get_measurement control with status {}", status);
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed validating idle_time_get_measurement control response with status {}", status);
+ goto exit;
+ }
+
+ idle_time_get_measurement_response = (CONTROL_PROTOCOL__idle_time_get_measurement_response_t *)(payload->parameters);
+
+ /*copy the measurement*/
+ *measurement = BYTE_ORDER__ntohll(idle_time_get_measurement_response->idle_time_ns);
+
+ LOGGER__DEBUG("Received idle measurement low: {:#X} ns",
+ *((uint32_t *) measurement));
+ LOGGER__DEBUG("Received idle measurement high: {:#X} ns",
+ *(((uint32_t *) measurement) + 1));
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::idle_time_set_measurement(Device &device, uint8_t measurement_enable)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request(&request, &request_size, device.get_control_sequence(), measurement_enable);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request with status {:#X}", common_status);
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed idle_time_set_measurement control with status {}", status);
+ goto exit;
+ }
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::set_pause_frames(Device &device, uint8_t rx_pause_frames_enable)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ HAILO_COMMON_STATUS_t common_status = CONTROL_PROTOCOL__pack_set_pause_frames_request(&request, &request_size,
+ device.get_control_sequence(), rx_pause_frames_enable);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::download_context_action_list_chunk(Device &device, uint32_t network_group_id,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index,
+ uint16_t action_list_offset, size_t action_list_max_size, uint32_t *base_address, uint8_t *action_list,
+ uint16_t *action_list_length, bool *is_action_list_end, uint32_t *batch_counter)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__download_context_action_list_response_t *context_action_list_response = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(base_address);
+ CHECK_ARG_NOT_NULL(action_list);
+ CHECK_ARG_NOT_NULL(action_list_length);
+
+ common_status = CONTROL_PROTOCOL__pack_download_context_action_list_request(&request, &request_size, device.get_control_sequence(),
+ network_group_id, context_type, context_index, action_list_offset);
+
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ context_action_list_response = (CONTROL_PROTOCOL__download_context_action_list_response_t *)(payload->parameters);
+
+ if (0 == BYTE_ORDER__ntohl(context_action_list_response->action_list_length)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Received empty action list");
+ goto exit;
+ }
+ if (0 == BYTE_ORDER__ntohl(context_action_list_response->base_address)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Received NULL pointer to base address");
+ goto exit;
+ }
+
+ if (action_list_max_size < BYTE_ORDER__ntohl(context_action_list_response->action_list_length)) {
+ status = HAILO_INVALID_CONTROL_RESPONSE;
+ LOGGER__ERROR("Received action list bigger than allocated user buffer");
+ }
+
+ (void)memcpy(action_list, context_action_list_response->action_list
+ ,BYTE_ORDER__ntohl(context_action_list_response->action_list_length));
+
+ *action_list_length = (uint16_t)(BYTE_ORDER__ntohl(context_action_list_response->action_list_length));
+ *base_address = BYTE_ORDER__ntohl(context_action_list_response->base_address);
+ *is_action_list_end = context_action_list_response->is_action_list_end;
+ *batch_counter = BYTE_ORDER__ntohl(context_action_list_response->batch_counter);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::download_context_action_list(Device &device, uint32_t network_group_id,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, size_t action_list_max_size,
+ uint32_t *base_address, uint8_t *action_list, uint16_t *action_list_length, uint32_t *batch_counter)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ bool is_action_list_end = false;
+ uint16_t chunk_action_list_length = 0;
+ uint16_t accumulated_action_list_length = 0;
+ uint8_t *action_list_current_offset = 0;
+ size_t remaining_action_list_max_size = 0;
+ uint32_t chunk_base_address = 0;
+ uint32_t batch_counter_local = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(base_address);
+ CHECK_ARG_NOT_NULL(action_list);
+ CHECK_ARG_NOT_NULL(action_list_length);
+
+ action_list_current_offset = action_list;
+ remaining_action_list_max_size = action_list_max_size;
+
+ do {
+ status = download_context_action_list_chunk(device, network_group_id, context_type, context_index,
+ accumulated_action_list_length, remaining_action_list_max_size, &chunk_base_address,
+ action_list_current_offset, &chunk_action_list_length, &is_action_list_end, &batch_counter_local);
+ CHECK_SUCCESS(status);
+
+ accumulated_action_list_length = (uint16_t)(accumulated_action_list_length + chunk_action_list_length);
+ action_list_current_offset += chunk_action_list_length;
+ remaining_action_list_max_size -= chunk_action_list_length;
+ }
+ while (!is_action_list_end);
+
+ /* Set output variables */
+ *base_address = chunk_base_address;
+ *action_list_length = accumulated_action_list_length;
+ *batch_counter = batch_counter_local;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::change_context_switch_status(Device &device,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status,
+ uint8_t network_group_index, uint16_t dynamic_batch_size, bool keep_nn_config_during_reset)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_change_context_switch_status_request(&request, &request_size,
+ device.get_control_sequence(), state_machine_status, network_group_index, dynamic_batch_size,
+ keep_nn_config_during_reset);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::enable_core_op(Device &device, uint8_t network_group_index, uint16_t dynamic_batch_size)
+{
+ static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
+ return Control::change_context_switch_status(device, CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_ENABLED,
+ network_group_index, dynamic_batch_size, REMOVE_NN_CONFIG_DURING_RESET);
+}
+
+hailo_status Control::reset_context_switch_state_machine(Device &device, bool keep_nn_config_during_reset)
+{
+ static const auto IGNORE_NETWORK_GROUP_INDEX = 0;
+ static const auto IGNORE_DYNAMIC_BATCH_SIZE = 0;
+ return Control::change_context_switch_status(device, CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_RESET,
+ IGNORE_NETWORK_GROUP_INDEX, IGNORE_DYNAMIC_BATCH_SIZE, keep_nn_config_during_reset);
+}
+
+hailo_status Control::wd_enable(Device &device, uint8_t cpu_id, bool should_enable)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_wd_enable(&request, &request_size, device.get_control_sequence(), cpu_id, should_enable);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_wd_enable with status {:#X}", common_status);
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed wd_enable control with status {}", status);
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+hailo_status Control::wd_config(Device &device, uint8_t cpu_id, uint32_t wd_cycles, CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_wd_config(&request, &request_size, device.get_control_sequence(), cpu_id, wd_cycles, wd_mode);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_wd_config with status {:#X}", common_status);
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed wd_config control with status {}", status);
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::previous_system_state(Device &device, uint8_t cpu_id, CONTROL_PROTOCOL__system_state_t *system)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__previous_system_state_response_t *previous_system_state_response = NULL;
+
+ CHECK_ARG_NOT_NULL(system);
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_previous_system_state(&request, &request_size, device.get_control_sequence(), cpu_id);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_previous_system_state with status {:#X}", common_status);
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed previous_system_state control with status {}", status);
+ goto exit;
+ }
+
+ previous_system_state_response = (CONTROL_PROTOCOL__previous_system_state_response_t *)(payload->parameters);
+
+ /*copy the measurement*/
+ *system = BYTE_ORDER__ntohl(previous_system_state_response->system_state);
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::set_dataflow_interrupt(Device &device, uint8_t interrupt_type, uint8_t interrupt_index,
+ uint8_t interrupt_sub_index)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_set_dataflow_interrupt_request(&request, &request_size, device.get_control_sequence(),
+ interrupt_type, interrupt_index, interrupt_sub_index);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::d2h_notification_manager_set_host_info(Device &device, uint16_t host_port, uint32_t host_ip_address)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ auto connection_type = ((Device::Type::PCIE == device.get_type() || Device::Type::INTEGRATED == device.get_type()) ?
+ D2H_EVENT_COMMUNICATION_TYPE_VDMA : D2H_EVENT_COMMUNICATION_TYPE_UDP);
+
+ common_status = CONTROL_PROTOCOL__pack_d2h_event_manager_set_host_info_request(&request, &request_size, device.get_control_sequence(),
+ static_cast<uint8_t>(connection_type), host_port, host_ip_address);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::d2h_notification_manager_send_host_info_notification(Device &device, uint8_t notification_priority)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_d2h_event_manager_send_host_info_event_request(&request, &request_size, device.get_control_sequence(), notification_priority);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+
+hailo_status Control::clear_configured_apps(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request(&request, &request_size,
+ device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request with status {:#X}",
+ common_status);
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("failed clear_configured_apps control with status {}", status);
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::get_chip_temperature(Device &device, hailo_chip_temperature_info_t *temp_info)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_chip_temperature_response_t* temps = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_get_chip_temperature_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ temps = (CONTROL_PROTOCOL__get_chip_temperature_response_t *)(payload->parameters);
+ temp_info->sample_count = BYTE_ORDER__ntohs(temps->info.sample_count);
+ temp_info->ts0_temperature = temps->info.ts0_temperature;
+ temp_info->ts1_temperature = temps->info.ts1_temperature;
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::enable_debugging(Device &device, bool is_rma)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_enable_debugging_request(&request, &request_size, device.get_control_sequence(), is_rma);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+Expected<CONTROL_PROTOCOL__get_extended_device_information_response_t> Control::get_extended_device_info_response(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arguments */
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ common_status = CONTROL_PROTOCOL__pack_get_extended_device_information_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return std::move(*(CONTROL_PROTOCOL__get_extended_device_information_response_t *)(payload->parameters));
+}
+
+Expected<uint32_t> Control::get_partial_clusters_layout_bitmap(Device &device)
+{
+ auto device_arch_exp = device.get_architecture();
+ CHECK_EXPECTED(device_arch_exp);
+ if (HAILO_ARCH_HAILO8L != device_arch_exp.value()) {
+ // Partial clusters layout is only relevant in HAILO_ARCH_HAILO8L arch
+ return Expected<uint32_t>(PARTIAL_CLUSTERS_LAYOUT_IGNORE);
+ }
+ auto extended_device_info_response = get_extended_device_info_response(device);
+ CHECK_EXPECTED(extended_device_info_response);
+ return BYTE_ORDER__ntohl(extended_device_info_response->partial_clusters_layout_bitmap);
+}
+
+Expected<hailo_extended_device_information_t> Control::get_extended_device_information(Device &device)
+{
+ auto extended_device_info_response = get_extended_device_info_response(device);
+ CHECK_EXPECTED(extended_device_info_response);
+ return control__parse_get_extended_device_information_results(extended_device_info_response.value());
+}
+
+Expected<hailo_health_info_t> Control::get_health_information(Device &device)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_health_information_response_t *get_health_information_response = NULL;
+
+ /* Validate arguments */
+
+ /* Validate arch */
+ status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ common_status = CONTROL_PROTOCOL__pack_get_health_information_request(&request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ get_health_information_response = (CONTROL_PROTOCOL__get_health_information_response_t *)(payload->parameters);
+
+ return control__parse_get_health_information_results(get_health_information_response);
+}
+
+hailo_status Control::config_context_switch_breakpoint(Device &device, uint8_t breakpoint_id,
+ CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ common_status = CONTROL_PROTOCOL__pack_config_context_switch_breakpoint_request(
+ &request, &request_size, device.get_control_sequence(), breakpoint_id, breakpoint_control, breakpoint_data);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::get_context_switch_breakpoint_status(Device &device, uint8_t breakpoint_id,
+ CONTROL_PROTOCOL__context_switch_debug_sys_status_t *breakpoint_status)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_context_switch_breakpoint_status_response_t *get_context_switch_breakpoint_status_response = NULL;
+
+ RETURN_IF_ARG_NULL(breakpoint_status);
+
+ common_status = CONTROL_PROTOCOL__pack_get_context_switch_breakpoint_status_request(
+ &request, &request_size, device.get_control_sequence(), breakpoint_id);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ get_context_switch_breakpoint_status_response =
+ (CONTROL_PROTOCOL__get_context_switch_breakpoint_status_response_t *)(payload->parameters);
+
+ memcpy(breakpoint_status,
+ &(get_context_switch_breakpoint_status_response->breakpoint_status),
+ sizeof((*breakpoint_status)));
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::get_context_switch_main_header(Device &device, CONTROL_PROTOCOL__context_switch_main_header_t *main_header)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__get_context_switch_main_header_response_t *get_context_switch_main_header_response = NULL;
+
+ RETURN_IF_ARG_NULL(main_header);
+
+ common_status = CONTROL_PROTOCOL__pack_get_context_switch_main_header_request(
+ &request, &request_size, device.get_control_sequence());
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ if (HAILO_SUCCESS != status) {
+ goto exit;
+ }
+
+ get_context_switch_main_header_response =
+ (CONTROL_PROTOCOL__get_context_switch_main_header_response_t *)(payload->parameters);
+
+ memcpy(main_header,
+ &(get_context_switch_main_header_response->main_header),
+ sizeof((*main_header)));
+
+ status = HAILO_SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status Control::config_context_switch_timestamp(Device &device, uint16_t batch_index, bool enable_user_configuration)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ auto common_status = CONTROL_PROTOCOL__pack_config_context_switch_timestamp_request(
+ &request, &request_size, device.get_control_sequence(), batch_index, enable_user_configuration);
+ auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::test_chip_memories(Device &device)
+{
+ uint32_t top_bypass_bitmap = 0;
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ /*cluster bypass and index are irrelevant for top*/
+ uint32_t cluster_bypass_bitmap_0 = 0;
+ uint32_t cluster_bypass_bitmap_1 = 0;
+
+ for (size_t mem_block = 0; mem_block < CONTROL_PROTOCOL__TOP_NUM_MEM_BLOCKS; mem_block++) {
+ /*only run test on allowed blocks */
+ if (0 == (CONTROL_PROTOCOL__BIST_TOP_WHITELIST & (1 << mem_block))) {
+ continue;
+ }
+ top_bypass_bitmap = CONTROL_PROTOCOL__BIST_TOP_BYPASS_ALL_MASK ^ (1 << mem_block);
+ auto block_status = run_bist_test(device, true, top_bypass_bitmap, 0, cluster_bypass_bitmap_0, cluster_bypass_bitmap_1);
+ if (HAILO_SUCCESS != block_status) {
+ LOGGER__ERROR("bist test failed on memory block {}", mem_block);
+ status = block_status;
+ }
+ }
+
+ for (uint8_t cluster_index = 0; cluster_index < CONTROL_PROTOCOL_NUM_BIST_CLUSTER_STEPS; cluster_index++) {
+ /*top bypass irrelevant for clusters*/
+ top_bypass_bitmap = 0;
+ /*run on all memory blocks, bypass = 0*/
+ cluster_bypass_bitmap_0 = 0;
+ cluster_bypass_bitmap_1 = 0;
+ auto cluster_status = run_bist_test(device, false, top_bypass_bitmap, cluster_index, cluster_bypass_bitmap_0, cluster_bypass_bitmap_1);
+ if (HAILO_SUCCESS != cluster_status) {
+ LOGGER__ERROR("bist test failed on cluster block {}", cluster_index);
+ status = cluster_status;
+ }
+ }
+
+ /*No errors encountered*/
+ if (HAILO_UNINITIALIZED == status){
+ status = HAILO_SUCCESS;
+ }
+
+ return status;
+}
+
+hailo_status Control::run_bist_test(Device &device, bool is_top_test, uint32_t top_bypass_bitmap,
+ uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ auto common_status = CONTROL_PROTOCOL__pack_run_bist_test_request(
+ &request, &request_size, device.get_control_sequence(),
+ is_top_test, top_bypass_bitmap, cluster_index, cluster_bypass_bitmap_0, cluster_bypass_bitmap_1);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::set_sleep_state(Device &device, hailo_sleep_state_t sleep_state)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Validate arch */
+ auto status = Control::validate_arch_supported(device);
+ CHECK_SUCCESS(status);
+
+ auto common_status = CONTROL_PROTOCOL__pack_set_sleep_state_request(
+ &request, &request_size, device.get_control_sequence(), static_cast<uint8_t>(sleep_state));
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::change_hw_infer_status(Device &device, CONTROL_PROTOCOL__hw_infer_state_t state,
+ uint8_t network_group_index, uint16_t dynamic_batch_size,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t *channels_info, CONTROL_PROTOCOL__hw_only_infer_results_t *results)
+{
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = sizeof(response_buffer);
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ CONTROL_PROTOCOL__change_hw_infer_status_response_t *change_hw_infer_status_response = NULL;
+
+ RETURN_IF_ARG_NULL(results);
+
+ auto common_status = CONTROL_PROTOCOL__pack_change_hw_infer_status_request(
+ &request, &request_size, device.get_control_sequence(), static_cast<uint8_t>(state),
+ network_group_index, dynamic_batch_size, channels_info);
+ auto status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = device.fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ CHECK_SUCCESS(status);
+
+ /* Parse response */
+ status = parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload,
+ &request);
+ CHECK_SUCCESS(status);
+
+ change_hw_infer_status_response = (CONTROL_PROTOCOL__change_hw_infer_status_response_t *)(payload->parameters);
+
+ memcpy(results, &(change_hw_infer_status_response->results), sizeof((*results)));
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Control::start_hw_only_infer(Device &device, uint8_t network_group_index, uint16_t dynamic_batch_size,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t *channels_info)
+{
+ CONTROL_PROTOCOL__hw_only_infer_results_t results = {};
+ return Control::change_hw_infer_status(device, CONTROL_PROTOCOL__HW_INFER_STATE_START,
+ network_group_index, dynamic_batch_size, channels_info ,&results);
+}
+
+hailo_status Control::stop_hw_only_infer(Device &device, CONTROL_PROTOCOL__hw_only_infer_results_t *results)
+{
+ const uint8_t DEFAULT_NETWORK_GROUP = 0;
+ const uint16_t DEFAULT_DYNAMIC_BATCH_SIZE = 1;
+ CONTROL_PROTOCOL__hw_infer_channels_info_t channels_info_default = {};
+ return Control::change_hw_infer_status(device, CONTROL_PROTOCOL__HW_INFER_STATE_STOP,
+ DEFAULT_NETWORK_GROUP, DEFAULT_DYNAMIC_BATCH_SIZE, &channels_info_default, results);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file control.hpp
+ * @brief Contains Defines and declarations related to controlling hailo8
+ **/
+
+#ifndef __CONTROL_HPP__
+#define __CONTROL_HPP__
+
+#include "hailo/hailort.h"
+#include "hailo/device.hpp"
+
+#include "device_common/control_protocol.hpp"
+
+#include "control_protocol.h"
+#include <stdbool.h>
+
+
+namespace hailort
+{
+
+#define CONTROL__MAX_SEQUENCE (0xFFFFFFFF)
+#define CONTROL__MAX_WRITE_MEMORY_CHUNK_SIZE (1024)
+
+#define FW_MAGIC (0x1DD89DE0)
+#define FW_SUPPORTED_HEADER_VERSION (0)
+#define BOARD_CONFIG_SIZE (500)
+
+/* TODO: Is this the correct size? */
+#define RESPONSE_MAX_BUFFER_SIZE (2048)
+#define WRITE_CHUNK_SIZE (1024)
+#define WORD_SIZE (4)
+
+
+class Control final
+{
+public:
+ Control() = delete;
+
+ static hailo_status parse_and_validate_response(uint8_t *message, uint32_t message_size,
+ CONTROL_PROTOCOL__response_header_t **header, CONTROL_PROTOCOL__payload_t **payload,
+ CONTROL_PROTOCOL__request_t *request);
+
+ /**
+ * Receive information about the device.
+ *
+ * @param[in] device - The Hailo device.
+ * @return The information about the board.
+ */
+ static Expected<hailo_device_identity_t> identify(Device &device);
+
+
+ /**
+ * Receive extended information about the device.
+ *
+ * @param[in] device - The Hailo device.
+ * @return The extended information about the board.
+ */
+ static Expected<hailo_extended_device_information_t> get_extended_device_information(Device &device);
+
+ /**
+ * Receive information about the core cpu.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[out] core_info - The information about the core cpu.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status core_identify(Device &device, hailo_core_information_t *core_info);
+
+ /**
+ * Configure a UDP input dataflow stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] params - The stream params that would be configured.
+ * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status config_stream_udp_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
+
+ /**
+ * Configure a UDP output dataflow stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] params - The stream params that would be configured.
+ * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status config_stream_udp_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
+
+ /**
+ * Configure a MIPI input dataflow stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] params - The stream params that would be configured.
+ * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status config_stream_mipi_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
+
+ /**
+ * Configure a MIPI output dataflow stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] params - The stream params that would be configured.
+ * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status config_stream_mipi_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
+
+ /**
+ * Configure a PCIe input dataflow stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] params - The stream params that would be configured.
+ * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status config_stream_pcie_input(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
+
+ /**
+ * Configure a PCIe output dataflow stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] params - The stream params that would be configured.
+ * @param[out] dataflow_manager_id - Unique id of the dataflow manager.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status config_stream_pcie_output(Device &device, CONTROL_PROTOCOL__config_stream_params_t *params, uint8_t &dataflow_manager_id);
+
+ /**
+ * Open a stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] dataflow_manager_id - Unique id of the dataflow manager.
+ * @param[in] is_input - Indicates whether the stream is an input or an output.
+ * @note The stream must be configured prior its opening;
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status open_stream(Device &device, uint8_t dataflow_manager_id, bool is_input);
+
+ /**
+ * Close a stream at a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] dataflow_manager_id - Unique id of the dataflow manager.
+ * @param[in] is_input - Indicates whether the stream is an input or an output.
+ * @note
+ * 1. A stream must be opened before closing.
+ * 2. A stream cannot be closed twice.
+ * 3. In order to close all the streams, call \ref close_all_streams.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status close_stream(Device &device, uint8_t dataflow_manager_id, bool is_input);
+ static hailo_status close_all_streams(Device &device);
+
+ /**
+ * Get idle time accumulated measurement.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[out] measurement - pointer to store the measurement
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status idle_time_get_measurement(Device &device, uint64_t *measurement);
+
+ /**
+ * start/stop idle time measurement
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] measurement_enable - start/stop the measurement
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status idle_time_set_measurement(Device &device, uint8_t measurement_enable);
+
+ /**
+ * Start firmware update of a Hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status start_firmware_update(Device &device);
+ static hailo_status write_firmware_update(Device &device, uint32_t offset, const uint8_t *data, uint32_t data_length);
+ static hailo_status validate_firmware_update(Device &device, MD5_SUM_t *expected_md5, uint32_t firmware_size);
+ static hailo_status finish_firmware_update(Device &device);
+ static hailo_status write_second_stage_to_internal_memory(Device &device, uint32_t offset, uint8_t *data, uint32_t data_length);
+ static hailo_status copy_second_stage_to_flash(Device &device, MD5_SUM_t *expected_md5, uint32_t second_stage_size);
+
+ static hailo_status examine_user_config(Device &device, hailo_fw_user_config_information_t *info);
+
+ static hailo_status read_user_config(Device &device, uint8_t *buffer, uint32_t buffer_length);
+
+ static hailo_status write_user_config(Device &device, const uint8_t *data, uint32_t data_length);
+
+ static hailo_status erase_user_config(Device &device);
+
+ static hailo_status read_board_config(Device &device, uint8_t *buffer, uint32_t buffer_length);
+
+ static hailo_status write_board_config(Device &device, const uint8_t *data, uint32_t data_length);
+
+ static hailo_status phy_operation(Device &device, CONTROL_PROTOCOL__phy_operation_t operation_type);
+
+ static hailo_status config_core_top(Device &device, CONTROL_PROTOCOL__config_core_top_type_t config_type,
+ CONTROL_PROTOCOL__config_core_top_params_t *params);
+
+ /**
+ * Write data to an I2C slave over a hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] slave_config - The configuration of the slave.
+ * @param[in] register_address - The address of the register to which the data will be written
+ * @param[in] data - A pointer to a buffer that contains the data to be written to the slave.
+ * @param[in] length - The size of @a data in bytes.
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ */
+ static hailo_status i2c_write(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
+ const uint8_t *data, uint32_t length);
+
+ /**
+ * Read data from an I2C slave over a hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] slave_config - The configuration of the slave.
+ * @param[in] register_address - The address of the register from which the data will be read.
+ * @param[in] data - Pointer to a buffer that would store the read data.
+ * @param[in] length - The number of bytes to read into the buffer pointed by @a data.
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ */
+ static hailo_status i2c_read(Device &device, const hailo_i2c_slave_config_t *slave_config, uint32_t register_address,
+ uint8_t *data, uint32_t length);
+
+ /**
+ * Measure the latency of a single image at the nn core of a hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] latency_measurement_en - Boolean if the latency should be enabled or not.
+ * @param[in] inbound_start_buffer_number - The inbound buffer from which the system start the latency measurement.
+ * @param[in] outbound_start_buffer_number - The outbound buffer from which the system ends the latency measurement.
+ * @param[in] inbound_stream_index - Which input stream to measure latency from.
+ * @param[in] outbound_stream_index - Which output stream to measure latency from.
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status latency_measurement_config(Device &device, uint8_t latency_measurement_en,
+
+ uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index,
+ uint32_t outbound_stream_index);
+ /**
+ * Read the measurement of the latency of a single image at the nn core of a hailo device.
+ *
+ * @param[in] device - The Hailo device.
+ * @param[out] inbound_to_outbound_latency_nsec - The latency in nanoseconds.
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status latency_measurement_read(Device &device, uint32_t *inbound_to_outbound_latency_nsec);
+ static hailo_status sensor_store_config(Device &device, uint32_t is_first, uint32_t section_index, uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size,
+ uint8_t *data, uint32_t data_length, uint16_t config_height, uint16_t config_width, uint16_t config_fps, uint32_t config_name_length, uint8_t *config_name);
+ static hailo_status sensor_get_config(Device &device, uint32_t section_index, uint32_t offset, uint32_t data_length, uint8_t *data);
+ static hailo_status sensor_set_i2c_bus_index(Device &device, uint32_t sensor_type, uint32_t bus_index);
+ static hailo_status sensor_load_and_start_config(Device &device, uint32_t section_index);
+ static hailo_status sensor_reset(Device &device, uint32_t section_index);
+ static hailo_status sensor_set_generic_i2c_slave(Device &device, uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness);
+ static hailo_status sensor_get_sections_info(Device &device, uint8_t *data);
+
+ /**
+ * Download generated context switch action list per single context
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] network_group_id - Unique identifier for the network group.
+ * @param[in] context_type - type of context
+ * @param[in] context_index - context index of the context the user wishes to download the action list. Should
+ * be 0 for non-dynamic contexts.
+ * @param[out] base address - base address of the context action list in the FW memory
+ * @param[out] action list - buffer of the action list
+ * @param[out] action_list_length - size of the action list buffer
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ // TODO: fix
+ static hailo_status download_context_action_list(Device &device, uint32_t network_group_id,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index,
+ size_t action_list_max_size, uint32_t *base_address, uint8_t *action_list, uint16_t *action_list_length,
+ uint32_t *batch_counter);
+
+ /**
+ * Enable core-op
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] core_op_index - core_op index
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status enable_core_op(Device &device, uint8_t core_op_index, uint16_t dynamic_batch_size);
+ /**
+ * reset context switch state machine
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] keep_nn_config_during_reset -
+ * Use if in the reset flow, user wise to remain in the same network group.
+ * this reset flow keep most of the configuration on the network group for faster batch switching.
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status reset_context_switch_state_machine(Device &device, bool keep_nn_config_during_reset);
+ /**
+ * set dataflow interrupt by control
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] interrupt_type - casted from enum into unit8_t - type of the interrupt
+ * @param[in] interrupt_index - interrupt index (PCIe channel or Cluster index)
+ * @param[in] interrupt_sub_index - interrupt index (LCU index in cluster)
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status set_dataflow_interrupt(Device &device, uint8_t interrupt_type, uint8_t interrupt_index,
+ uint8_t interrupt_sub_index);
+
+ /**
+ * set d2h manager a new host configuration by control
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] host_port - host port in case connection_type is Ethernet, otherwise neglected.
+ * @param[in] host_ip_address - host ip in case connection_type is Ethernet, otherwise neglected,
+ * 0 means auto detect IP address from control.
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status d2h_notification_manager_set_host_info(Device &device, uint16_t host_port, uint32_t host_ip_address);
+ static hailo_status d2h_notification_manager_send_host_info_notification(Device &device, uint8_t notification_priority);
+
+ /**
+ * Enable/disable halt transmition following Rx pause frame
+ *
+ * @param[in] device - The Hailo device.
+ * @param[in] rx_pause_frames_enable - Bool indicating weather to enable or disable rx pause frames
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
+ */
+ static hailo_status set_pause_frames(Device &device, uint8_t rx_pause_frames_enable);
+
+ static hailo_status set_fw_logger(Device &device, hailo_fw_logger_level_t level, uint32_t interface_mask);
+ static hailo_status write_memory(Device &device, uint32_t address, const uint8_t *data, uint32_t data_length);
+ static hailo_status read_memory(Device &device, uint32_t address, uint8_t *data, uint32_t data_length);
+ static hailo_status context_switch_set_context_info(Device &device,
+ const std::vector<CONTROL_PROTOCOL__context_switch_context_info_single_control_t> &context_infos);
+ static hailo_status context_switch_set_network_group_header(Device &device,
+ const CONTROL_PROTOCOL__application_header_t &network_group_header);
+ static hailo_status wd_enable(Device &device, uint8_t cpu_id, bool should_enable);
+ static hailo_status wd_config(Device &device, uint8_t cpu_id, uint32_t wd_cycles, CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode);
+ static hailo_status previous_system_state(Device &device, uint8_t cpu_id, CONTROL_PROTOCOL__system_state_t *system_state);
+ static hailo_status clear_configured_apps(Device &device);
+ static hailo_status get_chip_temperature(Device &device, hailo_chip_temperature_info_t *temp_info);
+ static hailo_status enable_debugging(Device &device, bool is_rma);
+
+ static hailo_status config_context_switch_breakpoint(Device &device, uint8_t breakpoint_id,
+ CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data);
+ static hailo_status get_context_switch_breakpoint_status(Device &device, uint8_t breakpoint_id,
+ CONTROL_PROTOCOL__context_switch_debug_sys_status_t *breakpoint_status);
+ static hailo_status get_context_switch_main_header(Device &device,
+ CONTROL_PROTOCOL__context_switch_main_header_t *main_header);
+ static hailo_status config_context_switch_timestamp(Device &device, uint16_t batch_index, bool enable_user_configuration);
+ static hailo_status test_chip_memories(Device &device);
+ static hailo_status run_bist_test(Device &device, bool is_top_test, uint32_t top_bypass_bitmap,
+ uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1);
+ static hailo_status set_clock_freq(Device &device, uint32_t clock_freq);
+ static Expected<hailo_health_info_t> get_health_information(Device &device);
+ static hailo_status set_throttling_state(Device &device, bool should_activate);
+ static Expected<bool> get_throttling_state(Device &device);
+ static hailo_status set_overcurrent_state(Device &device, bool should_activate);
+ static Expected<bool> get_overcurrent_state(Device &device);
+ static Expected<CONTROL_PROTOCOL__hw_consts_t> get_hw_consts(Device &device);
+ static hailo_status set_sleep_state(Device &device, hailo_sleep_state_t sleep_state);
+ static hailo_status change_hw_infer_status(Device &device, CONTROL_PROTOCOL__hw_infer_state_t state,
+ uint8_t network_group_index, uint16_t dynamic_batch_size,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t *channels_info, CONTROL_PROTOCOL__hw_only_infer_results_t *results);
+ static hailo_status start_hw_only_infer(Device &device, uint8_t network_group_index, uint16_t dynamic_batch_size,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t *channels_info);
+ static hailo_status stop_hw_only_infer(Device &device, CONTROL_PROTOCOL__hw_only_infer_results_t *results);
+ // TODO: needed?
+ static hailo_status power_measurement(Device &device, CONTROL_PROTOCOL__dvm_options_t dvm,
+ CONTROL_PROTOCOL__power_measurement_types_t measurement_type, float32_t *measurement);
+ static hailo_status set_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, CONTROL_PROTOCOL__dvm_options_t dvm,
+ CONTROL_PROTOCOL__power_measurement_types_t measurement_type);
+ static hailo_status get_power_measurement(Device &device, hailo_measurement_buffer_index_t buffer_index, bool should_clear,
+ hailo_power_measurement_data_t *measurement_data);
+ static hailo_status start_power_measurement(Device &device,
+ CONTROL_PROTOCOL__averaging_factor_t averaging_factor, CONTROL_PROTOCOL__sampling_period_t sampling_period);
+ static hailo_status stop_power_measurement(Device &device);
+
+ static Expected<uint32_t> get_partial_clusters_layout_bitmap(Device &device);
+
+private:
+ static hailo_status write_memory_chunk(Device &device, uint32_t address, const uint8_t *data, uint32_t chunk_size);
+ static hailo_status read_memory_chunk(Device &device, uint32_t address, uint8_t *data, uint32_t chunk_size);
+ static hailo_status read_user_config_chunk(Device &device, uint32_t read_offset, uint32_t read_length,
+ uint8_t *buffer, uint32_t *actual_read_data_length);
+ static hailo_status write_user_config_chunk(Device &device, uint32_t offset, const uint8_t *data, uint32_t chunk_size);
+ static hailo_status download_context_action_list_chunk(Device &device, uint32_t network_group_id,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, uint16_t action_list_offset,
+ size_t action_list_max_size, uint32_t *base_address, uint8_t *action_list, uint16_t *action_list_length,
+ bool *is_action_list_end, uint32_t *batch_counter);
+ static hailo_status context_switch_set_context_info_chunk(Device &device,
+ const CONTROL_PROTOCOL__context_switch_context_info_single_control_t &context_info);
+ static hailo_status change_context_switch_status(Device &device,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status,
+ uint8_t network_group_index, uint16_t dynamic_batch_size, bool keep_nn_config_during_reset);
+ static Expected<CONTROL_PROTOCOL__get_extended_device_information_response_t> get_extended_device_info_response(Device &device);
+ static hailo_status validate_arch_supported(Device &device, const std::vector<hailo_device_architecture_t> &supported_archs = { HAILO_ARCH_HAILO8, HAILO_ARCH_HAILO8L });
+};
+
+} /* namespace hailort */
+
+#endif /* __CONTROL_HPP__ */
--- /dev/null
+/*
+ * =============================================================================
+ *
+ * HAILO
+ *
+ * Property of HAILO Tech
+ * For Unrestricted Internal Use Only
+ * Unauthorized reproduction and/or distribution is strictly prohibited.
+ * This product is protected under copyright law and trade secret law
+ * Created 2018, (C) Copyright 2018 Hailo Tech . All rights reserved.
+ * as an unpublished work.
+ */
+/**
+* Filename: control_protocol.c
+*
+* Description: Implements control protocol packing/unpacking.
+*
+*=============================================================================*/
+
+#include "common/utils.hpp"
+
+#include "device_common/control_protocol.hpp"
+
+#include "control_protocol.h"
+#include "byte_order.h"
+#include "status.h"
+#include <stdint.h>
+#include <string.h>
+
+
+using namespace hailort;
+
+#ifndef FIRMWARE_ARCH /*this file should not be compiled for firmware*/
+
+bool g_CONTROL_PROTOCOL__is_critical[HAILO_CONTROL_OPCODE_COUNT] = {
+#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) is_critical,
+ CONTROL_PROTOCOL__OPCODES_VARIABLES
+#undef CONTROL_PROTOCOL__OPCODE_X
+};
+
+CPU_ID_t g_CONTROL_PROTOCOL__cpu_id[HAILO_CONTROL_OPCODE_COUNT] = {
+#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) cpu_id,
+ CONTROL_PROTOCOL__OPCODES_VARIABLES
+#undef CONTROL_PROTOCOL__OPCODE_X
+};
+
+const char *CONTROL_PROTOCOL__textual_format[] =
+{
+#define STRINGIFY(name) #name
+#define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) STRINGIFY(name),
+ CONTROL_PROTOCOL__OPCODES_VARIABLES
+#undef CONTROL_PROTOCOL__OPCODE_X
+};
+
+const char *CONTROL_PROTOCOL__get_textual_opcode(CONTROL_PROTOCOL__OPCODE_t opcode)
+{
+ return CONTROL_PROTOCOL__textual_format[opcode];
+}
+
+#define CHANGE_HW_INFER_REQUEST_PARAMETER_COUNT (4)
+
+/* Functions declarations */
+HAILO_COMMON_STATUS_t control_protocol__parse_message(uint8_t *message,
+ uint32_t message_size,
+ CONTROL_PROTOCOL__common_header_t **header,
+ uint16_t full_header_size,
+ CONTROL_PROTOCOL__payload_t **payload,
+ uint8_t expected_ack_value);
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__parse_response(uint8_t *message,
+ uint32_t message_size,
+ CONTROL_PROTOCOL__response_header_t **header,
+ CONTROL_PROTOCOL__payload_t **payload,
+ CONTROL_PROTOCOL__status_t *fw_status)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ if ((NULL == message) || (NULL == header) || (NULL == payload) || (NULL == fw_status)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ status = control_protocol__parse_message(message,
+ message_size,
+ (CONTROL_PROTOCOL__common_header_t**)header,
+ sizeof(**header),
+ payload,
+ CONTROL_PROTOCOL__ACK_SET);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ /* Copy firmware status from header */
+ fw_status->major_status = BYTE_ORDER__ntohl((*header)->status.major_status);
+ fw_status->minor_status = BYTE_ORDER__ntohl((*header)->status.minor_status);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t control_protocol__parse_message(uint8_t *message,
+ uint32_t message_size,
+ CONTROL_PROTOCOL__common_header_t **header,
+ uint16_t full_header_size,
+ CONTROL_PROTOCOL__payload_t **payload,
+ uint8_t expected_ack_value)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t current_offset = 0;
+ CONTROL_PROTOCOL__parameter_t *current_parameter = NULL;
+ uint32_t parameter_count = 0;
+ CONTROL_PROTOCOL_flags_t control_flags = {};
+ CONTROL_PROTOCOL__common_header_t *local_common_header = NULL;
+ CONTROL_PROTOCOL__payload_t *local_payload = NULL;
+ uint32_t protocol_version = 0;
+
+ local_common_header = (CONTROL_PROTOCOL__common_header_t *)(message);
+ protocol_version = BYTE_ORDER__ntohl(local_common_header->version);
+
+ switch (protocol_version) {
+ case CONTROL_PROTOCOL__PROTOCOL_VERSION_2:
+ break;
+ default:
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_VERSION;
+ goto exit;
+ break;
+ }
+
+ control_flags.integer = BYTE_ORDER__ntohl(local_common_header->flags.integer);
+ if (expected_ack_value != control_flags.bitstruct.ack) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__UNEXPECTED_ACK_VALUE;
+ goto exit;
+ }
+
+ current_offset = full_header_size;
+ /* Check if there are any parameters to parse */
+ if (current_offset < message_size) {
+ local_payload = (CONTROL_PROTOCOL__payload_t *)(message + current_offset);
+ current_offset += sizeof(*local_payload);
+
+ /* If the are any parameters, start parsing them */
+ if (0 < BYTE_ORDER__ntohl(local_payload->parameter_count)) {
+ /* Check that the frame doesn't overrun after parameter count */
+ if (current_offset > message_size) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__OVERRUN_BEFORE_PARAMETER;
+ goto exit;
+ }
+ /* Validate each parameter */
+ for (parameter_count = 0;
+ parameter_count < BYTE_ORDER__ntohl(local_payload->parameter_count);
+ ++parameter_count) {
+ current_parameter = (CONTROL_PROTOCOL__parameter_t *)(
+ (message) + current_offset);
+ /* Check that the parameter donesn't overrun the packet */
+ current_offset += sizeof(*current_parameter) + BYTE_ORDER__ntohl(current_parameter->length);
+ if (current_offset > message_size) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__OVERRUN_AT_PARAMETER;
+ goto exit;
+ }
+ }
+ }
+ }
+
+ /* Validate all of the message was parsed */
+ if (current_offset != message_size) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__PART_OF_THE_MESSAGE_NOT_PARSED;
+ goto exit;
+ }
+
+ /* Packet is valid, assign out parameters */
+ *header = local_common_header;
+ local_common_header = NULL;
+ *payload = local_payload;
+ local_payload = NULL;
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+exit:
+ return status;
+}
+
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__get_sequence_from_response_buffer(uint8_t *response_buffer,
+ size_t response_buffer_size, uint32_t *sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ uint32_t local_sequence = 0;
+ CONTROL_PROTOCOL__common_header_t *common_header = NULL;
+
+ if ((NULL == response_buffer) || (NULL == sequence)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ if (sizeof(CONTROL_PROTOCOL__common_header_t) > response_buffer_size) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_BUFFER_SIZE;
+ goto exit;
+ }
+
+ /* Get the sequence from the common header */
+ common_header = ((CONTROL_PROTOCOL__common_header_t*)(response_buffer));
+ local_sequence = BYTE_ORDER__ntohl(common_header->sequence);
+
+ *sequence = local_sequence;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+void control_protocol__pack_request_header(CONTROL_PROTOCOL__request_t *request, uint32_t sequence, CONTROL_PROTOCOL__OPCODE_t opcode, uint32_t parameter_count)
+{
+ request->header.common_header.opcode = BYTE_ORDER__htonl(opcode);
+ request->header.common_header.sequence = BYTE_ORDER__htonl(sequence);
+ request->header.common_header.version = BYTE_ORDER__htonl(CONTROL_PROTOCOL__PROTOCOL_VERSION);
+
+ request->parameter_count = BYTE_ORDER__htonl(parameter_count);
+}
+
+HAILO_COMMON_STATUS_t control_protocol__pack_empty_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__OPCODE_t opcode)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ control_protocol__pack_request_header(request, sequence, opcode, 0);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_IDENTIFY);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_core_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_CORE_IDENTIFY);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_fw_logger_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ hailo_fw_logger_level_t level, uint8_t interface_mask)
+{
+ size_t local_request_size = 0;
+
+ CHECK(request != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+ CHECK(request_size != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+
+ CHECK(level <= (uint8_t) CONTROL_PROTOCOL__FW_MAX_LOGGER_LEVEL, HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT);
+ CHECK(interface_mask <= CONTROL_PROTOCOL__FW_MAX_LOGGER_INTERFACE, HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT);
+
+ static_assert((uint32_t) FW_LOGGER_LEVEL_TRACE == (uint32_t) HAILO_FW_LOGGER_LEVEL_TRACE,
+ "mismatch in FW_LOGGER_LEVEL_TRACE and HAILO_FW_LOGGER_LEVEL_TRACE");
+ static_assert((uint32_t) FW_LOGGER_LEVEL_DEBUG == (uint32_t) HAILO_FW_LOGGER_LEVEL_DEBUG,
+ "mismatch in FW_LOGGER_LEVEL_DEBUG and HAILO_FW_LOGGER_LEVEL_DEBUG");
+ static_assert((uint32_t) FW_LOGGER_LEVEL_INFO == (uint32_t) HAILO_FW_LOGGER_LEVEL_INFO,
+ "mismatch in FW_LOGGER_LEVEL_INFO and HAILO_FW_LOGGER_LEVEL_INFO");
+ static_assert((uint32_t) FW_LOGGER_LEVEL_WARN == (uint32_t) HAILO_FW_LOGGER_LEVEL_WARN,
+ "mismatch in FW_LOGGER_LEVEL_WARN and HAILO_FW_LOGGER_LEVEL_WARN");
+ static_assert((uint32_t) FW_LOGGER_LEVEL_ERROR == (uint32_t) HAILO_FW_LOGGER_LEVEL_ERROR,
+ "mismatch in FW_LOGGER_LEVEL_ERROR and HAILO_FW_LOGGER_LEVEL_ERROR");
+ static_assert((uint32_t) FW_LOGGER_LEVEL_FATAL == (uint32_t) HAILO_FW_LOGGER_LEVEL_FATAL,
+ "mismatch in FW_LOGGER_LEVEL_FATAL and HAILO_FW_LOGGER_LEVEL_FATAL");
+ static_assert((uint32_t)CONTROL_PROTOCOL__INTERFACE_PCIE == (uint32_t)HAILO_FW_LOGGER_INTERFACE_PCIE,
+ "mismatch in CONTROL_PROTOCOL__INTERFACE_PCIE and HAILO_FW_LOGGER_INTERFACE_PCIE");
+ static_assert((uint32_t)CONTROL_PROTOCOL__INTERFACE_UART == (uint32_t)HAILO_FW_LOGGER_INTERFACE_UART,
+ "mismatch in CONTROL_PROTOCOL__INTERFACE_UART and HAILO_FW_LOGGER_INTERFACE_UART");
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_fw_logger_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_FW_LOGGER, 2);
+
+ request->parameters.set_fw_logger_request.level_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_fw_logger_request.level));
+ request->parameters.set_fw_logger_request.level = static_cast<uint8_t>(level);
+
+ request->parameters.set_fw_logger_request.logger_interface_bit_mask_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_fw_logger_request.logger_interface_bit_mask));
+ request->parameters.set_fw_logger_request.logger_interface_bit_mask = interface_mask;
+
+ *request_size = local_request_size;
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ bool should_activate)
+{
+ size_t local_request_size = 0;
+
+ CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+ CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_throttling_state_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_THROTTLING_STATE, 1);
+
+ request->parameters.set_throttling_state_request.should_activate_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_throttling_state_request.should_activate));
+ request->parameters.set_throttling_state_request.should_activate = should_activate;
+
+ *request_size = local_request_size;
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_THROTTLING_STATE);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ bool should_activate)
+{
+ size_t local_request_size = 0;
+
+ CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+ CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_overcurrent_state_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_OVERCURRENT_STATE, 1);
+
+ request->parameters.set_overcurrent_state_request.should_activate_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_overcurrent_state_request.should_activate));
+ request->parameters.set_overcurrent_state_request.should_activate = should_activate;
+
+ *request_size = local_request_size;
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_OVERCURRENT_STATE);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_hw_consts_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_HW_CONSTS);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_clock_freq_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint32_t clock_freq)
+{
+ size_t local_request_size = 0;
+
+ CHECK(request != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+ CHECK(request_size != nullptr, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_clock_freq_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_CLOCK_FREQ, 1);
+
+ request->parameters.set_clock_freq_request.clock_freq_length = BYTE_ORDER__htonl(sizeof(request->parameters.set_clock_freq_request.clock_freq));
+ request->parameters.set_clock_freq_request.clock_freq = BYTE_ORDER__htonl(clock_freq);
+
+ *request_size = local_request_size;
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_memory_request_t) + data_length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_MEMORY, 2);
+
+ /* Address */
+ request->parameters.write_memory_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_memory_request.address));
+ request->parameters.write_memory_request.address = BYTE_ORDER__htonl(address);
+
+ /* Data */
+ request->parameters.write_memory_request.data_length = BYTE_ORDER__htonl(data_length);
+ memcpy(&(request->parameters.write_memory_request.data), data, data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__read_memory_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_READ_MEMORY, 2);
+
+ /* Address */
+ request->parameters.read_memory_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_memory_request.address));
+ request->parameters.read_memory_request.address = BYTE_ORDER__htonl(address);
+
+ /* Data count */
+ request->parameters.read_memory_request.data_count_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_memory_request.data_count));
+ request->parameters.read_memory_request.data_count = BYTE_ORDER__htonl(data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_open_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__open_stream_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_OPEN_STREAM, 2);
+
+ /* dataflow_manager_id */
+ request->parameters.open_stream_request.dataflow_manager_id_length = BYTE_ORDER__htonl(sizeof(request->parameters.open_stream_request.dataflow_manager_id));
+ request->parameters.open_stream_request.dataflow_manager_id = dataflow_manager_id;
+
+ /* is_input */
+ request->parameters.open_stream_request.is_input_length = BYTE_ORDER__htonl(sizeof(request->parameters.open_stream_request.is_input));
+ request->parameters.open_stream_request.is_input = is_input;
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_close_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__close_stream_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CLOSE_STREAM, 2);
+
+ /* dataflow_manager_id */
+ request->parameters.close_stream_request.dataflow_manager_id_length = BYTE_ORDER__htonl(sizeof(request->parameters.close_stream_request.dataflow_manager_id));
+ request->parameters.close_stream_request.dataflow_manager_id = dataflow_manager_id;
+
+ /* is_input */
+ request->parameters.close_stream_request.is_input_length = BYTE_ORDER__htonl(sizeof(request->parameters.close_stream_request.is_input));
+ request->parameters.close_stream_request.is_input = is_input;
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t control_protocol__pack_config_stream_base_request(CONTROL_PROTOCOL__request_t *request, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ /* stream index */
+ request->parameters.config_stream_request.stream_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.stream_index));
+ request->parameters.config_stream_request.stream_index = params->stream_index;
+
+ /* is_input */
+ request->parameters.config_stream_request.is_input_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.is_input));
+ request->parameters.config_stream_request.is_input = params->is_input;
+
+ /* communication_type */
+ request->parameters.config_stream_request.communication_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.communication_type));
+ request->parameters.config_stream_request.communication_type = BYTE_ORDER__htonl(params->communication_type);
+
+ /* skip_nn_stream_config */
+ request->parameters.config_stream_request.skip_nn_stream_config_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.skip_nn_stream_config));
+ request->parameters.config_stream_request.skip_nn_stream_config = params->skip_nn_stream_config;
+
+ /* power_mode */
+ request->parameters.config_stream_request.power_mode_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.power_mode));
+ request->parameters.config_stream_request.power_mode = params->power_mode;
+
+ /* nn_stream_config */
+ request->parameters.config_stream_request.nn_stream_config_length = BYTE_ORDER__htonl(sizeof(request->parameters.config_stream_request.nn_stream_config));
+ request->parameters.config_stream_request.nn_stream_config.core_bytes_per_buffer = BYTE_ORDER__htons(params->nn_stream_config.core_bytes_per_buffer);
+ request->parameters.config_stream_request.nn_stream_config.core_buffers_per_frame = BYTE_ORDER__htons(params->nn_stream_config.core_buffers_per_frame);
+ request->parameters.config_stream_request.nn_stream_config.periph_bytes_per_buffer = BYTE_ORDER__htons(params->nn_stream_config.periph_bytes_per_buffer);
+ request->parameters.config_stream_request.nn_stream_config.periph_buffers_per_frame = BYTE_ORDER__htons(params->nn_stream_config.periph_buffers_per_frame);
+ request->parameters.config_stream_request.nn_stream_config.feature_padding_payload = BYTE_ORDER__htons(params->nn_stream_config.feature_padding_payload);
+ request->parameters.config_stream_request.nn_stream_config.buffer_padding_payload = BYTE_ORDER__htons(params->nn_stream_config.buffer_padding_payload);
+ request->parameters.config_stream_request.nn_stream_config.buffer_padding = BYTE_ORDER__htons(params->nn_stream_config.buffer_padding);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+ goto exit;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__udp_input_config_params_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
+
+ status = control_protocol__pack_config_stream_base_request(request, params);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.udp_input));
+ request->parameters.config_stream_request.communication_params.udp_input.listening_port = BYTE_ORDER__htons(params->communication_params.udp_input.listening_port);
+
+ request->parameters.config_stream_request.communication_params.udp_input.sync.should_sync = params->communication_params.udp_input.sync.should_sync;
+ request->parameters.config_stream_request.communication_params.udp_input.sync.frames_per_sync = BYTE_ORDER__htonl(params->communication_params.udp_input.sync.frames_per_sync);
+ request->parameters.config_stream_request.communication_params.udp_input.sync.packets_per_frame = BYTE_ORDER__htonl(params->communication_params.udp_input.sync.packets_per_frame);
+ request->parameters.config_stream_request.communication_params.udp_input.sync.sync_size = BYTE_ORDER__htons(params->communication_params.udp_input.sync.sync_size);
+
+ request->parameters.config_stream_request.communication_params.udp_input.buffers_threshold = BYTE_ORDER__htonl(params->communication_params.udp_input.buffers_threshold);
+ request->parameters.config_stream_request.communication_params.udp_input.use_rtp = params->communication_params.udp_input.use_rtp;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__udp_output_config_params_t);
+
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
+
+ status = control_protocol__pack_config_stream_base_request(request, params);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.udp_output));
+ request->parameters.config_stream_request.communication_params.udp_output.host_udp_port = BYTE_ORDER__htons(params->communication_params.udp_output.host_udp_port);
+ request->parameters.config_stream_request.communication_params.udp_output.chip_udp_port = BYTE_ORDER__htons(params->communication_params.udp_output.chip_udp_port);
+ request->parameters.config_stream_request.communication_params.udp_output.max_udp_payload_size = BYTE_ORDER__htons(params->communication_params.udp_output.max_udp_payload_size);
+ request->parameters.config_stream_request.communication_params.udp_output.should_send_sync_packets = params->communication_params.udp_output.should_send_sync_packets;
+ request->parameters.config_stream_request.communication_params.udp_output.buffers_threshold = BYTE_ORDER__htonl(params->communication_params.udp_output.buffers_threshold);
+ request->parameters.config_stream_request.communication_params.udp_output.use_rtp = params->communication_params.udp_output.use_rtp;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ /* Calculate the size of the exact mipi_input configuration struct instead of the entire union */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__mipi_input_config_params_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
+
+ status = control_protocol__pack_config_stream_base_request(request, params);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.mipi_input));
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.data_type = params->communication_params.mipi_input.common_params.data_type;
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.pixels_per_clock = params->communication_params.mipi_input.common_params.pixels_per_clock;
+ request->parameters.config_stream_request.communication_params.mipi_input.mipi_rx_id = params->communication_params.mipi_input.mipi_rx_id;
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.number_of_lanes = params->communication_params.mipi_input.common_params.number_of_lanes;
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.clock_selection = params->communication_params.mipi_input.common_params.clock_selection;
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.data_rate = BYTE_ORDER__htonl(params->communication_params.mipi_input.common_params.data_rate);
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.virtual_channel_index = params->communication_params.mipi_input.common_params.virtual_channel_index;
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.img_width_pixels = params->communication_params.mipi_input.common_params.img_width_pixels;
+ request->parameters.config_stream_request.communication_params.mipi_input.common_params.img_height_pixels = params->communication_params.mipi_input.common_params.img_height_pixels;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_enable = params->communication_params.mipi_input.isp_params.isp_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_img_in_order = params->communication_params.mipi_input.isp_params.isp_img_in_order;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_img_out_data_type = params->communication_params.mipi_input.isp_params.isp_img_out_data_type;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_enable = params->communication_params.mipi_input.isp_params.isp_crop_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_width_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_width_pixels;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_height_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_height_pixels;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_width_start_offset_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_width_start_offset_pixels;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_crop_output_height_start_offset_pixels = params->communication_params.mipi_input.isp_params.isp_crop_output_height_start_offset_pixels;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_test_pattern_enable = params->communication_params.mipi_input.isp_params.isp_test_pattern_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_configuration_bypass = params->communication_params.mipi_input.isp_params.isp_configuration_bypass;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_ae_enable = params->communication_params.mipi_input.isp_params.isp_run_time_ae_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_awb_enable = params->communication_params.mipi_input.isp_params.isp_run_time_awb_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_adt_enable = params->communication_params.mipi_input.isp_params.isp_run_time_adt_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_af_enable = params->communication_params.mipi_input.isp_params.isp_run_time_af_enable;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_run_time_calculations_interval_ms = params->communication_params.mipi_input.isp_params.isp_run_time_calculations_interval_ms;
+ request->parameters.config_stream_request.communication_params.mipi_input.isp_params.isp_light_frequency = params->communication_params.mipi_input.isp_params.isp_light_frequency;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ /* Calculate the size of the exact mipi_output configuration struct instead of the entire union */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t) - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__mipi_output_config_params_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
+
+ status = control_protocol__pack_config_stream_base_request(request, params);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ request->parameters.config_stream_request.communication_params_length = BYTE_ORDER__htonl(sizeof(params->communication_params.mipi_output));
+ request->parameters.config_stream_request.communication_params.mipi_output.fifo_threshold_percent = params->communication_params.mipi_output.fifo_threshold_percent;
+ request->parameters.config_stream_request.communication_params.mipi_output.mipi_tx_id = params->communication_params.mipi_output.mipi_tx_id;
+ request->parameters.config_stream_request.communication_params.mipi_output.deskew_enable = params->communication_params.mipi_output.deskew_enable;
+ request->parameters.config_stream_request.communication_params.mipi_output.common_params.data_rate = BYTE_ORDER__htonl(params->communication_params.mipi_output.common_params.data_rate);
+ request->parameters.config_stream_request.communication_params.mipi_output.common_params.clock_selection = params->communication_params.mipi_output.common_params.clock_selection;
+ request->parameters.config_stream_request.communication_params.mipi_output.common_params.data_type = params->communication_params.mipi_output.common_params.data_type;
+ request->parameters.config_stream_request.communication_params.mipi_output.common_params.number_of_lanes = params->communication_params.mipi_output.common_params.number_of_lanes;
+ request->parameters.config_stream_request.communication_params.mipi_output.common_params.pixels_per_clock = params->communication_params.mipi_output.common_params.pixels_per_clock;
+ request->parameters.config_stream_request.communication_params.mipi_output.common_params.virtual_channel_index = params->communication_params.mipi_output.common_params.virtual_channel_index;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t)
+ - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__pcie_input_config_params_t);
+
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
+
+ status = control_protocol__pack_config_stream_base_request(request, params);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ request->parameters.config_stream_request.communication_params_length =
+ BYTE_ORDER__htonl(sizeof(params->communication_params.pcie_input));
+ request->parameters.config_stream_request.communication_params.pcie_input.pcie_channel_index =
+ params->communication_params.pcie_input.pcie_channel_index;
+ request->parameters.config_stream_request.communication_params.pcie_input.pcie_dataflow_type =
+ params->communication_params.pcie_input.pcie_dataflow_type;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_stream_request_t)
+ - sizeof(CONTROL_PROTOCOL__communication_config_prams_t) + sizeof(CONTROL_PROTOCOL__pcie_output_config_params_t);
+
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_STREAM, 7);
+
+ status = control_protocol__pack_config_stream_base_request(request, params);
+ if (HAILO_COMMON_STATUS__SUCCESS != status) {
+ goto exit;
+ }
+
+ request->parameters.config_stream_request.communication_params_length =
+ BYTE_ORDER__htonl(sizeof(params->communication_params.pcie_output));
+ request->parameters.config_stream_request.communication_params.pcie_output.pcie_channel_index =
+ params->communication_params.pcie_output.pcie_channel_index;
+ request->parameters.config_stream_request.communication_params.pcie_output.desc_page_size =
+ params->communication_params.pcie_output.desc_page_size;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__reset_type_t reset_type)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__reset_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_RESET, 1);
+
+ /* reset_type */
+ request->parameters.reset_resquest.reset_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.reset_resquest.reset_type));
+ request->parameters.reset_resquest.reset_type = BYTE_ORDER__htonl((uint32_t)reset_type);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__power_measurement_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_POWER_MEASUEMENT, 2);
+
+ /* dvm */
+ request->parameters.measure_power_request.dvm_length = BYTE_ORDER__htonl(sizeof(request->parameters.measure_power_request.dvm_length));
+ request->parameters.measure_power_request.dvm = BYTE_ORDER__htonl((uint32_t)dvm);
+
+
+ /* measurement_type */
+ request->parameters.measure_power_request.measurement_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.measure_power_request.measurement_type));
+ request->parameters.measure_power_request.measurement_type = BYTE_ORDER__htonl((uint32_t)measurement_type);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_power_measurement_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_POWER_MEASUEMENT, 3);
+
+ /* index */
+ request->parameters.set_measure_power_request.index_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.set_measure_power_request.index));
+ request->parameters.set_measure_power_request.index = BYTE_ORDER__htonl(index);
+
+ /* dvm */
+ request->parameters.set_measure_power_request.dvm_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.set_measure_power_request.dvm));
+ request->parameters.set_measure_power_request.dvm = BYTE_ORDER__htonl((uint32_t)dvm);
+
+
+ /* measurement_type */
+ request->parameters.set_measure_power_request.measurement_type_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.set_measure_power_request.measurement_type));
+ request->parameters.set_measure_power_request.measurement_type = BYTE_ORDER__htonl((uint32_t)measurement_type);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, bool should_clear)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__get_power_measurement_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_GET_POWER_MEASUEMENT, 2);
+
+ /* index */
+ request->parameters.get_measure_power_request.index_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.get_measure_power_request.index));
+ request->parameters.get_measure_power_request.index = BYTE_ORDER__htonl(index);
+
+ /* should_clear */
+ request->parameters.get_measure_power_request.should_clear_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.get_measure_power_request.should_clear));
+ request->parameters.get_measure_power_request.should_clear = (uint8_t)should_clear;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t delay_milliseconds, CONTROL_PROTOCOL__averaging_factor_t averaging_factor , CONTROL_PROTOCOL__sampling_period_t sampling_period)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+ uint16_t local_averaging_factor = 0;
+ uint16_t local_sampling_period = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ local_averaging_factor = ((uint16_t)(averaging_factor));
+ local_sampling_period = ((uint16_t)(sampling_period));
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__start_power_measurement_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_START_POWER_MEASUEMENT, 3);
+
+ /* delay_milliseconds */
+ request->parameters.start_measure_power_request.delay_milliseconds_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.start_measure_power_request.delay_milliseconds));
+ request->parameters.start_measure_power_request.delay_milliseconds = BYTE_ORDER__htonl(delay_milliseconds);
+
+ /* averaging_factor */
+ request->parameters.start_measure_power_request.averaging_factor_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.start_measure_power_request.averaging_factor));
+ request->parameters.start_measure_power_request.averaging_factor = BYTE_ORDER__htons(local_averaging_factor);
+
+ /* sampling_period */
+ request->parameters.start_measure_power_request.sampling_period_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.start_measure_power_request.sampling_period));
+ request->parameters.start_measure_power_request.sampling_period = BYTE_ORDER__htons(local_sampling_period);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_write_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
+ uint32_t sequence, uint32_t register_address, uint8_t endianness, uint16_t slave_address,
+ uint8_t register_address_size, uint8_t bus_index, const uint8_t *data, uint32_t length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__i2c_write_request_t) + length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_I2C_WRITE, 7);
+
+ /* register_address */
+ request->parameters.i2c_write_request.register_address_size = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_write_request.register_address));
+ request->parameters.i2c_write_request.register_address = BYTE_ORDER__htonl(register_address);
+
+ /* endianness */
+ request->parameters.i2c_write_request.slave_config.endianness_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_write_request.slave_config.endianness));
+ request->parameters.i2c_write_request.slave_config.endianness = endianness;
+
+ /* slave_address */
+ request->parameters.i2c_write_request.slave_config.slave_address_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_write_request.slave_config.slave_address));
+ request->parameters.i2c_write_request.slave_config.slave_address = BYTE_ORDER__htons(slave_address);
+
+ /* register_address_size */
+ request->parameters.i2c_write_request.slave_config.register_address_size_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_write_request.slave_config.register_address_size));
+ request->parameters.i2c_write_request.slave_config.register_address_size = register_address_size;
+
+ /* bus_index */
+ request->parameters.i2c_write_request.slave_config.bus_index_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_write_request.slave_config.bus_index));
+ request->parameters.i2c_write_request.slave_config.bus_index = bus_index;
+
+ /* Data */
+ request->parameters.i2c_write_request.data_length = BYTE_ORDER__htonl(length);
+ memcpy(&(request->parameters.i2c_write_request.data), data, length);
+
+ /* should_hold_bus */
+ request->parameters.i2c_write_request.slave_config.should_hold_bus_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_write_request.slave_config.should_hold_bus));
+ request->parameters.i2c_write_request.slave_config.should_hold_bus = false;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
+ uint32_t sequence, uint32_t register_address, uint8_t endianness,
+ uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, uint32_t length, bool should_hold_bus)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__i2c_read_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_I2C_READ, 7);
+
+ /* data_length */
+ request->parameters.i2c_read_request.data_length_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.data_length));
+ request->parameters.i2c_read_request.data_length = BYTE_ORDER__htonl(length);
+
+ /* register_address */
+ request->parameters.i2c_read_request.register_address_size = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.register_address));
+ request->parameters.i2c_read_request.register_address = BYTE_ORDER__htonl(register_address);
+
+ /* endianness */
+ request->parameters.i2c_read_request.slave_config.endianness_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.slave_config.endianness));
+ request->parameters.i2c_read_request.slave_config.endianness = endianness;
+
+ /* slave_address */
+ request->parameters.i2c_read_request.slave_config.slave_address_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.slave_config.slave_address));
+ request->parameters.i2c_read_request.slave_config.slave_address = BYTE_ORDER__htons(slave_address);
+
+ /* register_address_size */
+ request->parameters.i2c_read_request.slave_config.register_address_size_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.slave_config.register_address_size));
+ request->parameters.i2c_read_request.slave_config.register_address_size = register_address_size;
+
+ /* bus_index */
+ request->parameters.i2c_read_request.slave_config.bus_index_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.slave_config.bus_index));
+ request->parameters.i2c_read_request.slave_config.bus_index = bus_index;
+
+ /* should_hold_bus */
+ request->parameters.i2c_read_request.slave_config.should_hold_bus_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.i2c_read_request.slave_config.should_hold_bus));
+ request->parameters.i2c_read_request.slave_config.should_hold_bus = should_hold_bus;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_stop_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_STOP_POWER_MEASUEMENT);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_core_top_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_core_top_type_t config_type, CONTROL_PROTOCOL__config_core_top_params_t *params)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == params)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__config_core_top_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_CORE_TOP, 2);
+
+ /* config_type */
+ request->parameters.config_core_top_request.config_type_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.config_core_top_request.config_type));
+ request->parameters.config_core_top_request.config_type = BYTE_ORDER__htonl(config_type);
+
+ /* params */
+ request->parameters.config_core_top_request.config_params_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.config_core_top_request.config_params));
+ (void)memcpy(&request->parameters.config_core_top_request.config_params,
+ params,
+ sizeof(request->parameters.config_core_top_request.config_params));
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_phy_operation_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__phy_operation_t operation_type)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__phy_operation_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_PHY_OPERATION, 1);
+
+ /* operation_type */
+ request->parameters.phy_operation_request.operation_type_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.phy_operation_request.operation_type));
+ request->parameters.phy_operation_request.operation_type = BYTE_ORDER__htonl((uint32_t)operation_type);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__read_user_config_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_READ_USER_CONFIG, 2);
+
+ /* Address */
+ request->parameters.read_user_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_user_config_request.address));
+ request->parameters.read_user_config_request.address = BYTE_ORDER__htonl(address);
+
+ /* Data count */
+ request->parameters.read_user_config_request.data_count_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_user_config_request.data_count));
+ request->parameters.read_user_config_request.data_count = BYTE_ORDER__htonl(data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_examine_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_EXAMINE_USER_CONFIG, 0);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_user_config_request_t) + data_length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_USER_CONFIG, 2);
+
+ /* Address */
+ request->parameters.write_user_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_user_config_request.address));
+ request->parameters.write_user_config_request.address = BYTE_ORDER__htonl(address);
+
+ /* Data */
+ request->parameters.write_user_config_request.data_length = BYTE_ORDER__htonl(data_length);
+ memcpy(&(request->parameters.write_user_config_request.data), data, data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_erase_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_ERASE_USER_CONFIG, 0);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE ;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_START_FIRMWARE_UPDATE, 0);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_finish_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE ;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_FINISH_FIRMWARE_UPDATE, 0);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t offset, const uint8_t *data, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_firmware_update_request_t) + data_length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_FIRMWARE_UPDATE, 2);
+
+ /* offset */
+ request->parameters.write_firmware_update_request.offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_firmware_update_request.offset));
+ request->parameters.write_firmware_update_request.offset = BYTE_ORDER__htonl(offset);
+
+ /* data */
+ request->parameters.write_firmware_update_request.data_length = BYTE_ORDER__htonl(data_length);
+ memcpy(&(request->parameters.write_firmware_update_request.data), data, data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t offset, uint8_t *data, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request_t) + data_length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_SECOND_STAGE_TO_INTERNAL_MEMORY, 2);
+
+ /* offset */
+ request->parameters.write_second_stage_to_internal_memory_request.offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_second_stage_to_internal_memory_request.offset));
+ request->parameters.write_second_stage_to_internal_memory_request.offset = BYTE_ORDER__htonl(offset);
+
+ /* data */
+ request->parameters.write_second_stage_to_internal_memory_request.data_length = BYTE_ORDER__htonl(data_length);
+ memcpy(&(request->parameters.write_second_stage_to_internal_memory_request.data), data, data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__copy_second_stage_to_flash_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, MD5_SUM_t *expected_md5, uint32_t second_stage_size)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__copy_second_stage_to_flash_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_COPY_SECOND_STAGE_TO_FLASH, 2);
+
+ /* expected md5 */
+ request->parameters.copy_second_stage_to_flash_request.expected_md5_length = BYTE_ORDER__htonl(sizeof(request->parameters.copy_second_stage_to_flash_request.expected_md5));
+ memcpy(&(request->parameters.copy_second_stage_to_flash_request.expected_md5),
+ *expected_md5,
+ sizeof(request->parameters.copy_second_stage_to_flash_request.expected_md5));
+
+ /* second_stage_size */
+ request->parameters.copy_second_stage_to_flash_request.second_stage_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.copy_second_stage_to_flash_request.second_stage_size));
+ request->parameters.copy_second_stage_to_flash_request.second_stage_size = BYTE_ORDER__htonl(second_stage_size);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_validate_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, MD5_SUM_t *expected_md5, uint32_t firmware_size)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__validate_firmware_update_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_VALIDATE_FIRMWARE_UPDATE, 2);
+
+ /* expected md5 */
+ request->parameters.validate_firmware_update_request.expected_md5_length = BYTE_ORDER__htonl(sizeof(request->parameters.validate_firmware_update_request.expected_md5));
+ memcpy(&(request->parameters.validate_firmware_update_request.expected_md5),
+ *expected_md5,
+ sizeof(request->parameters.validate_firmware_update_request.expected_md5));
+
+ /* firmware_size */
+ request->parameters.validate_firmware_update_request.firmware_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.validate_firmware_update_request.firmware_size));
+ request->parameters.validate_firmware_update_request.firmware_size = BYTE_ORDER__htonl(firmware_size);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t latency_measurement_en, uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index, uint32_t outbound_stream_index)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__latency_config_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_NN_CORE_LATENCY_MEASUREMENT_CONFIG, 5);
+
+ /* latency_measurement_en */
+ request->parameters.latency_config_request.latency_measurement_en_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.latency_config_request.latency_measurement_en));
+ request->parameters.latency_config_request.latency_measurement_en = latency_measurement_en;
+
+ /* inbound_start_buffer_number */
+ request->parameters.latency_config_request.inbound_start_buffer_number_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.latency_config_request.inbound_start_buffer_number));
+ request->parameters.latency_config_request.inbound_start_buffer_number = BYTE_ORDER__htonl(inbound_start_buffer_number);
+
+ /* outbound_stop_buffer_number */
+ request->parameters.latency_config_request.outbound_stop_buffer_number_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.latency_config_request.outbound_stop_buffer_number));
+ request->parameters.latency_config_request.outbound_stop_buffer_number = BYTE_ORDER__htonl(outbound_stop_buffer_number);
+
+ /* inbound_stream_index */
+ request->parameters.latency_config_request.inbound_stream_index_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.latency_config_request.inbound_stream_index));
+ request->parameters.latency_config_request.inbound_stream_index = BYTE_ORDER__htonl(inbound_stream_index);
+
+ /* outbound_stream_index */
+ request->parameters.latency_config_request.outbound_stream_index_length = BYTE_ORDER__htonl(
+ sizeof(request->parameters.latency_config_request.outbound_stream_index));
+ request->parameters.latency_config_request.outbound_stream_index = BYTE_ORDER__htonl(outbound_stream_index);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_NN_CORE_LATENCY_MEASUREMENT_READ, 0);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_store_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t is_first, uint32_t section_index,
+ uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size,
+ uint8_t *data, uint32_t data_length, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
+ uint32_t config_name_length, uint8_t *config_name)
+
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_store_config_request_t) + data_length;
+
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_STORE_CONFIG, 11);
+
+ /* section index */
+ request->parameters.sensor_store_config_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.section_index));
+ request->parameters.sensor_store_config_request.section_index = BYTE_ORDER__htonl(section_index);
+
+ /* is_first */
+ request->parameters.sensor_store_config_request.is_first_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.is_first));
+ request->parameters.sensor_store_config_request.is_first = BYTE_ORDER__htonl(is_first);
+
+ /* start_offset */
+ request->parameters.sensor_store_config_request.start_offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.start_offset));
+ request->parameters.sensor_store_config_request.start_offset = BYTE_ORDER__htonl(start_offset);
+
+ /* reset_data_size */
+ request->parameters.sensor_store_config_request.reset_data_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.reset_data_size));
+ request->parameters.sensor_store_config_request.reset_data_size = BYTE_ORDER__htonl(reset_data_size);
+
+ /* sensor_type */
+ request->parameters.sensor_store_config_request.sensor_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.sensor_type));
+ request->parameters.sensor_store_config_request.sensor_type = BYTE_ORDER__htonl(sensor_type);
+
+ /* total_data_size */
+ request->parameters.sensor_store_config_request.total_data_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.total_data_size));
+ request->parameters.sensor_store_config_request.total_data_size = BYTE_ORDER__htonl(total_data_size);
+
+ /* config_width */
+ request->parameters.sensor_store_config_request.config_width_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.config_width));
+ request->parameters.sensor_store_config_request.config_width = BYTE_ORDER__htons(config_width);
+
+ /* config_height */
+ request->parameters.sensor_store_config_request.config_height_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.config_height));
+ request->parameters.sensor_store_config_request.config_height = BYTE_ORDER__htons(config_height);
+
+ /* config_fps */
+ request->parameters.sensor_store_config_request.config_fps_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_store_config_request.config_fps));
+ request->parameters.sensor_store_config_request.config_fps = BYTE_ORDER__htons(config_fps);
+
+ /* Config_name */
+ if(config_name_length <= MAX_CONFIG_NAME_LEN){
+ request->parameters.sensor_store_config_request.config_name_length = BYTE_ORDER__htonl(MAX_CONFIG_NAME_LEN);
+ memcpy(&(request->parameters.sensor_store_config_request.config_name), config_name, config_name_length);
+ }
+ else{
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
+ goto exit;
+ }
+
+ /* Data */
+ request->parameters.sensor_store_config_request.data_length = BYTE_ORDER__htonl(data_length);
+ memcpy(&(request->parameters.sensor_store_config_request.data), data, data_length);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint32_t section_index, uint32_t offset, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_get_config_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_GET_CONFIG, 3);
+
+ /* section_index */
+ request->parameters.sensor_get_config_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_get_config_request.section_index));
+ request->parameters.sensor_get_config_request.section_index = BYTE_ORDER__htonl(section_index);
+
+ /* offset */
+ request->parameters.sensor_get_config_request.offset_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_get_config_request.offset));
+ request->parameters.sensor_get_config_request.offset = BYTE_ORDER__htonl(offset);
+
+ /* Data count */
+ request->parameters.sensor_get_config_request.data_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_get_config_request.data_size));
+ request->parameters.sensor_get_config_request.data_size = BYTE_ORDER__htonl(data_length);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+hailo_status CONTROL_PROTOCOL__pack_sensor_set_i2c_bus_index_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t sensor_type, uint32_t bus_index)
+{
+ size_t local_request_size = 0;
+
+ CHECK_ARG_NOT_NULL(request);
+ CHECK_ARG_NOT_NULL(request_size);
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_set_i2c_bus_index_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_SET_I2C_BUS_INDEX, 2);
+
+ /* section index */
+ request->parameters.sensor_set_i2c_bus_index.sensor_type_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_i2c_bus_index.sensor_type));
+ request->parameters.sensor_set_i2c_bus_index.sensor_type = BYTE_ORDER__htonl(sensor_type);
+
+ /* bus_index */
+ request->parameters.sensor_set_i2c_bus_index.i2c_bus_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_i2c_bus_index.i2c_bus_index));
+ request->parameters.sensor_set_i2c_bus_index.i2c_bus_index = BYTE_ORDER__htonl(bus_index);
+
+ *request_size = local_request_size;
+
+ return HAILO_SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_load_and_start_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) ) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_load_config_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_LOAD_AND_START, 1);
+
+ /* section index */
+ request->parameters.sensor_load_config_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_load_config_request.section_index));
+ request->parameters.sensor_load_config_request.section_index = BYTE_ORDER__htonl(section_index);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) ) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_reset_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_RESET, 1);
+
+ /* section index */
+ request->parameters.sensor_reset_request.section_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_reset_request.section_index));
+ request->parameters.sensor_reset_request.section_index = BYTE_ORDER__htonl(section_index);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_set_generic_i2c_slave_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint16_t slave_address,
+ uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) ) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__sensor_set_generic_i2c_slave_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SENSOR_SET_GENERIC_I2C_SLAVE, 5);
+
+ /* slave_address */
+ request->parameters.sensor_set_generic_i2c_slave_request.slave_address_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.slave_address));
+ request->parameters.sensor_set_generic_i2c_slave_request.slave_address = BYTE_ORDER__htons(slave_address);
+
+ /* register_address_size */
+ request->parameters.sensor_set_generic_i2c_slave_request.register_address_size_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.register_address_size));
+ request->parameters.sensor_set_generic_i2c_slave_request.register_address_size = register_address_size;
+
+ /* bus index */
+ request->parameters.sensor_set_generic_i2c_slave_request.bus_index_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.bus_index));
+ request->parameters.sensor_set_generic_i2c_slave_request.bus_index = bus_index;
+
+ /* should_hold_bus */
+ request->parameters.sensor_set_generic_i2c_slave_request.should_hold_bus_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.should_hold_bus));
+ request->parameters.sensor_set_generic_i2c_slave_request.should_hold_bus = should_hold_bus;
+
+ /* endianness */
+ request->parameters.sensor_set_generic_i2c_slave_request.endianness_length = BYTE_ORDER__htonl(sizeof(request->parameters.sensor_set_generic_i2c_slave_request.endianness));
+ request->parameters.sensor_set_generic_i2c_slave_request.endianness = endianness;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_sections_info_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_SENSOR_GET_SECTIONS_INFO);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_network_group_header_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ const CONTROL_PROTOCOL__application_header_t *network_group_header)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == network_group_header)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__context_switch_set_network_group_header_request_t);
+ control_protocol__pack_request_header(request, sequence,
+ HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_SET_NETWORK_GROUP_HEADER, 1);
+
+ /* application_header */
+ request->parameters.context_switch_set_network_group_header_request.application_header_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_network_group_header_request.application_header));
+ memcpy(&(request->parameters.context_switch_set_network_group_header_request.application_header),
+ network_group_header,
+ sizeof(request->parameters.context_switch_set_network_group_header_request.application_header));
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_context_info_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ const CONTROL_PROTOCOL__context_switch_context_info_single_control_t *context_info)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == context_info)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__context_switch_set_context_info_request_t) + context_info->context_network_data_length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_SET_CONTEXT_INFO, 4);
+
+ /* is_first_control_per_context */
+ request->parameters.context_switch_set_context_info_request.is_first_control_per_context_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.is_first_control_per_context));
+ request->parameters.context_switch_set_context_info_request.is_first_control_per_context =
+ context_info->is_first_control_per_context;
+
+ /* is_last_control_per_context */
+ request->parameters.context_switch_set_context_info_request.is_last_control_per_context_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.is_last_control_per_context));
+ request->parameters.context_switch_set_context_info_request.is_last_control_per_context =
+ context_info->is_last_control_per_context;
+
+ /* context_type */
+ request->parameters.context_switch_set_context_info_request.context_type_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.context_switch_set_context_info_request.context_type));
+ request->parameters.context_switch_set_context_info_request.context_type =
+ context_info->context_type;
+
+ /* Network data (edge layers + Trigger groups) */
+ if (CONTROL_PROTOCOL__CONTEXT_NETWORK_DATA_SINGLE_CONTROL_MAX_SIZE < context_info->context_network_data_length) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_BUFFER_SIZE;
+ goto exit;
+ }
+ request->parameters.context_switch_set_context_info_request.context_network_data_length =
+ BYTE_ORDER__htonl(context_info->context_network_data_length);
+ memcpy(&(request->parameters.context_switch_set_context_info_request.context_network_data),
+ &(context_info->context_network_data), context_info->context_network_data_length);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_IDLE_TIME_GET_MEASUREMENT);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t measurement_enable)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__idle_time_set_measurement_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_IDLE_TIME_SET_MEASUREMENT, 1);
+
+ /*measurement duration*/
+ request->parameters.idle_time_set_measurement_request.measurement_enable_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.idle_time_set_measurement_request.measurement_enable));
+ request->parameters.idle_time_set_measurement_request.measurement_enable = measurement_enable;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_pause_frames_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size, uint32_t sequence, uint8_t rx_pause_frames_enable)
+{
+
+ CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+ CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+
+ /* Header */
+ size_t local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__set_pause_frames_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_PAUSE_FRAMES, 1);
+
+ /*measurement duration*/
+ request->parameters.set_pause_frames_request.rx_pause_frames_enable_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.set_pause_frames_request.rx_pause_frames_enable));
+ request->parameters.set_pause_frames_request.rx_pause_frames_enable = rx_pause_frames_enable;
+
+ *request_size = local_request_size;
+
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_download_context_action_list_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size, uint32_t sequence, uint32_t network_group_id,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, uint16_t action_list_offset)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__download_context_action_list_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_DOWNLOAD_CONTEXT_ACTION_LIST, 4);
+
+ /* network_group_id */
+ request->parameters.download_context_action_list_request.network_group_id_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.network_group_id));
+ request->parameters.download_context_action_list_request.network_group_id = BYTE_ORDER__htonl(network_group_id);
+
+ /* context_type */
+ request->parameters.download_context_action_list_request.context_type_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.context_type));
+ request->parameters.download_context_action_list_request.context_type = static_cast<uint8_t>(context_type);
+
+ /* context_index */
+ request->parameters.download_context_action_list_request.context_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.context_index));
+ request->parameters.download_context_action_list_request.context_index = context_index;
+
+ /* action_list_offset */
+ request->parameters.download_context_action_list_request.action_list_offset_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.download_context_action_list_request.action_list_offset));
+ request->parameters.download_context_action_list_request.action_list_offset = BYTE_ORDER__htons(action_list_offset);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status, uint8_t application_index,
+ uint16_t dynamic_batch_size, bool keep_nn_config_during_reset)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__change_context_switch_status_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CHANGE_CONTEXT_SWITCH_STATUS, 4);
+
+ /* state_machine_status */
+ request->parameters.change_context_switch_status_request.state_machine_status_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.state_machine_status));
+ memcpy(&(request->parameters.change_context_switch_status_request.state_machine_status),
+ &(state_machine_status),
+ sizeof(request->parameters.change_context_switch_status_request.state_machine_status));
+
+ /* application_index */
+ request->parameters.change_context_switch_status_request.application_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.application_index));
+ request->parameters.change_context_switch_status_request.application_index = application_index;
+
+ /* dynamic_batch_size */
+ request->parameters.change_context_switch_status_request.dynamic_batch_size_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.dynamic_batch_size));
+ request->parameters.change_context_switch_status_request.dynamic_batch_size = dynamic_batch_size;
+
+ /* dynamic_batch_size */
+ request->parameters.change_context_switch_status_request.keep_nn_config_during_reset_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.keep_nn_config_during_reset));
+ request->parameters.change_context_switch_status_request.keep_nn_config_during_reset = keep_nn_config_during_reset;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_enable(
+ CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t cpu_id,
+ bool should_enable)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+ CONTROL_PROTOCOL__OPCODE_t opcode = HAILO_CONTROL_OPCODE_COUNT;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ if (CPU_ID_CORE_CPU < cpu_id){
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
+ goto exit;
+ }
+
+ opcode = (CPU_ID_CORE_CPU == cpu_id) ? HAILO_CONTROL_OPCODE_CORE_WD_ENABLE : HAILO_CONTROL_OPCODE_APP_WD_ENABLE;
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__wd_enable_request_t);
+ control_protocol__pack_request_header(request, sequence, opcode, 1);
+
+ request->parameters.wd_enable_request.should_enable_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.wd_enable_request.should_enable));
+ request->parameters.wd_enable_request.should_enable = should_enable;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_config(
+ CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t cpu_id,
+ uint32_t wd_cycles,
+ CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+ CONTROL_PROTOCOL__OPCODE_t opcode = HAILO_CONTROL_OPCODE_COUNT;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+ if (CPU_ID_CORE_CPU < cpu_id){
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
+ goto exit;
+ }
+
+ opcode = (CPU_ID_CORE_CPU == cpu_id) ? HAILO_CONTROL_OPCODE_CORE_WD_CONFIG : HAILO_CONTROL_OPCODE_APP_WD_CONFIG;
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__wd_config_request_t);
+ control_protocol__pack_request_header(request, sequence, opcode, 2);
+
+ request->parameters.wd_config_request.wd_cycles_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.wd_config_request.wd_cycles));
+ request->parameters.wd_config_request.wd_cycles = BYTE_ORDER__htonl(wd_cycles);
+ request->parameters.wd_config_request.wd_mode_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.wd_config_request.wd_mode));
+ request->parameters.wd_config_request.wd_mode = static_cast<uint8_t>(wd_mode);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request(
+ CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ *request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ control_protocol__pack_empty_request(request, request_size, sequence,
+ HAILO_CONTROL_OPCODE_CONTEXT_SWITCH_CLEAR_CONFIGURED_APPS);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_previous_system_state(
+ CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t cpu_id)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+ CONTROL_PROTOCOL__OPCODE_t opcode = HAILO_CONTROL_OPCODE_COUNT;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+ if (CPU_ID_CORE_CPU < cpu_id){
+ status = HAILO_STATUS__CONTROL_PROTOCOL__INVALID_ARGUMENT;
+ goto exit;
+ }
+
+ opcode = (CPU_ID_CORE_CPU == cpu_id) ? HAILO_CONTROL_OPCODE_CORE_PREVIOUS_SYSTEM_STATE : HAILO_CONTROL_OPCODE_APP_PREVIOUS_SYSTEM_STATE;
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ *request_size = local_request_size;
+ control_protocol__pack_empty_request(request, request_size, sequence, opcode);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_dataflow_interrupt_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t interrupt_type, uint8_t interrupt_index, uint8_t interrupt_sub_index)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__set_dataflow_interrupt_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_DATAFLOW_INTERRUPT, 3);
+
+ /* Interrupt_type */
+ request->parameters.set_dataflow_interrupt_request.interrupt_type_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_type));
+ memcpy(&(request->parameters.set_dataflow_interrupt_request.interrupt_type),
+ &(interrupt_type),
+ sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_type));
+
+ /* Interrupt_index */
+ request->parameters.set_dataflow_interrupt_request.interrupt_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_index));
+ memcpy(&(request->parameters.set_dataflow_interrupt_request.interrupt_index),
+ &(interrupt_index),
+ sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_index));
+
+ /* Interrupt_sub_index */
+ request->parameters.set_dataflow_interrupt_request.interrupt_sub_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_sub_index));
+ memcpy(&(request->parameters.set_dataflow_interrupt_request.interrupt_sub_index),
+ &(interrupt_sub_index),
+ sizeof(request->parameters.set_dataflow_interrupt_request.interrupt_sub_index));
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_set_host_info_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t connection_type, uint16_t host_port, uint32_t host_ip_address)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__d2h_event_manager_set_new_host_info_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_D2H_EVENT_MANAGER_SET_HOST_INFO, 3);
+
+ /* connection_type */
+ request->parameters.d2h_event_manager_set_new_host_info_request.connection_type_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_set_new_host_info_request.connection_type));
+ request->parameters.d2h_event_manager_set_new_host_info_request.connection_type = connection_type;
+
+
+ /* remote_port */
+ request->parameters.d2h_event_manager_set_new_host_info_request.host_port_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_set_new_host_info_request.host_port));
+ request->parameters.d2h_event_manager_set_new_host_info_request.host_port = BYTE_ORDER__htons(host_port);
+
+
+ /* remote_ip_address */
+ request->parameters.d2h_event_manager_set_new_host_info_request.host_ip_address_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_set_new_host_info_request.host_ip_address));
+ request->parameters.d2h_event_manager_set_new_host_info_request.host_ip_address = BYTE_ORDER__htonl(host_ip_address);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_send_host_info_event_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t event_priority)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__d2h_event_manager_send_host_info_event_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_D2H_EVENT_MANAGER_SEND_EVENT_HOST_INFO, 1);
+
+ /* event_priority */
+ request->parameters.d2h_event_manager_send_host_info_event_request.priority_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.d2h_event_manager_send_host_info_event_request.priority));
+ request->parameters.d2h_event_manager_send_host_info_event_request.priority = event_priority;
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_chip_temperature_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_CHIP_TEMPERATURE);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_board_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__read_board_config_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_READ_BOARD_CONFIG, 2);
+
+ /* Address */
+ request->parameters.read_board_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_board_config_request.address));
+ request->parameters.read_board_config_request.address = BYTE_ORDER__htonl(address);
+
+ /* Data count */
+ request->parameters.read_board_config_request.data_count_length = BYTE_ORDER__htonl(sizeof(request->parameters.read_board_config_request.data_count));
+ request->parameters.read_board_config_request.data_count = BYTE_ORDER__htonl(data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_board_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
+ uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__write_board_config_request_t) + data_length;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_WRITE_BOARD_CONFIG, 2);
+
+ /* Address */
+ request->parameters.write_board_config_request.address_length = BYTE_ORDER__htonl(sizeof(request->parameters.write_board_config_request.address));
+ request->parameters.write_board_config_request.address = BYTE_ORDER__htonl(address);
+
+ /* Data */
+ request->parameters.write_board_config_request.data_length = BYTE_ORDER__htonl(data_length);
+
+ memcpy(&(request->parameters.write_board_config_request.data), data, data_length);
+
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_enable_debugging_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_rma)
+{
+ /* Header */
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_ENABLE_DEBUGGING, 1);
+
+ /* is_rma */
+ request->parameters.enable_debugging_request.is_rma_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.enable_debugging_request.is_rma));
+ request->parameters.enable_debugging_request.is_rma = is_rma;
+
+ *request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE + sizeof(CONTROL_PROTOCOL__enable_debugging_request_t);
+
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_extended_device_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_DEVICE_INFORMATION);
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_health_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ return control_protocol__pack_empty_request(request, request_size, sequence, HAILO_CONTROL_OPCODE_GET_HEALTH_INFORMATION);
+}
+
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_breakpoint_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t breakpoint_id,
+ CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size) || (NULL == breakpoint_data)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__config_context_switch_breakpoint_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_CONTEXT_SWITCH_BREAKPOINT, 3);
+
+ /* breakpoint id */
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_id_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_id));
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_id = breakpoint_id;
+
+ /* breakpoint status */
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_control_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_control));
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_control = (uint8_t)breakpoint_control;
+
+ /* breakpoint data */
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_data_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_data));
+ memcpy(&(request->parameters.config_context_switch_breakpoint_request.breakpoint_data),
+ breakpoint_data,
+ sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_data));
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_breakpoint_status_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t breakpoint_id)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__get_context_switch_breakpoint_status_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_GET_CONTEXT_SWITCH_BREAKPOINT_STATUS, 1);
+
+ /* breakpoint id */
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_id_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_breakpoint_request.breakpoint_id));
+ request->parameters.config_context_switch_breakpoint_request.breakpoint_id = breakpoint_id;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_main_header_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE;
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_GET_CONTEXT_SWITCH_MAIN_HEADER, 0);
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_timestamp_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint16_t batch_index, bool enable_user_configuration)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__config_context_switch_timestamp_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CONFIG_CONTEXT_SWITCH_TIMESTAMP, 2);
+
+ /* batch index */
+ request->parameters.config_context_switch_timestamp_request.batch_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_timestamp_request.batch_index));
+ request->parameters.config_context_switch_timestamp_request.batch_index = BYTE_ORDER__htons(batch_index);
+
+ /* enable_user_configuration */
+ request->parameters.config_context_switch_timestamp_request.enable_user_configuration_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.config_context_switch_timestamp_request.enable_user_configuration));
+ request->parameters.config_context_switch_timestamp_request.enable_user_configuration = enable_user_configuration;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_run_bist_test_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_top_test,
+ uint32_t top_bypass_bitmap, uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1)
+{
+ size_t local_request_size = 0;
+
+ CHECK_NOT_NULL(request, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+ CHECK_NOT_NULL(request_size, HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED);
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__run_bist_test_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_RUN_BIST_TEST, 5);
+
+ /* running on top */
+ request->parameters.run_bist_test_request.is_top_test_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.is_top_test));
+ request->parameters.run_bist_test_request.is_top_test = is_top_test;
+
+ /* top bypass */
+ request->parameters.run_bist_test_request.top_bypass_bitmap_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.top_bypass_bitmap));
+ request->parameters.run_bist_test_request.top_bypass_bitmap = BYTE_ORDER__htonl(top_bypass_bitmap);
+
+ /* cluster index */
+ request->parameters.run_bist_test_request.cluster_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.cluster_index));
+ request->parameters.run_bist_test_request.cluster_index = cluster_index;
+
+ /* cluster bypass 0 */
+ request->parameters.run_bist_test_request.cluster_bypass_bitmap_0_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.cluster_bypass_bitmap_0));
+ request->parameters.run_bist_test_request.cluster_bypass_bitmap_0 = BYTE_ORDER__htonl(cluster_bypass_bitmap_0);
+
+ /* cluster bypass 1 */
+ request->parameters.run_bist_test_request.cluster_bypass_bitmap_1_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.run_bist_test_request.cluster_bypass_bitmap_1));
+ request->parameters.run_bist_test_request.cluster_bypass_bitmap_1 = BYTE_ORDER__htonl(cluster_bypass_bitmap_1);
+
+ *request_size = local_request_size;
+
+ return HAILO_COMMON_STATUS__SUCCESS;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_sleep_state_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t sleep_state)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__set_sleep_state_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_SET_SLEEP_STATE, 1);
+
+ /* sleep_state */
+ request->parameters.set_sleep_state_request.sleep_state_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.set_sleep_state_request.sleep_state));
+ request->parameters.set_sleep_state_request.sleep_state = sleep_state;
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_hw_infer_status_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t hw_infer_state, uint8_t network_group_index, uint16_t dynamic_batch_size,
+ CONTROL_PROTOCOL__hw_infer_channels_info_t *channels_info)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ size_t local_request_size = 0;
+
+ if ((NULL == request) || (NULL == request_size)) {
+ status = HAILO_STATUS__CONTROL_PROTOCOL__NULL_ARGUMENT_PASSED;
+ goto exit;
+ }
+
+ /* Header */
+ local_request_size = CONTROL_PROTOCOL__REQUEST_BASE_SIZE +
+ sizeof(CONTROL_PROTOCOL__change_hw_infer_status_request_t);
+ control_protocol__pack_request_header(request, sequence, HAILO_CONTROL_OPCODE_CHANGE_HW_INFER_STATUS,
+ CHANGE_HW_INFER_REQUEST_PARAMETER_COUNT);
+
+ /* hw_infer_state */
+ request->parameters.change_hw_infer_status_request.hw_infer_state_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_hw_infer_status_request.hw_infer_state));
+ request->parameters.change_hw_infer_status_request.hw_infer_state = hw_infer_state;
+
+ /* network_group_index */
+ request->parameters.change_hw_infer_status_request.application_index_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_hw_infer_status_request.application_index));
+ request->parameters.change_hw_infer_status_request.application_index = network_group_index;
+
+ /* dynamic_batch_size */
+ request->parameters.change_hw_infer_status_request.dynamic_batch_size_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_hw_infer_status_request.dynamic_batch_size));
+ request->parameters.change_hw_infer_status_request.dynamic_batch_size = dynamic_batch_size;
+
+ /* channels_info */
+ request->parameters.change_hw_infer_status_request.channels_info_length =
+ BYTE_ORDER__htonl(sizeof(request->parameters.change_hw_infer_status_request.channels_info));
+ memcpy(&(request->parameters.change_hw_infer_status_request.channels_info),
+ channels_info,
+ sizeof(request->parameters.change_hw_infer_status_request.channels_info));
+
+ *request_size = local_request_size;
+ status = HAILO_COMMON_STATUS__SUCCESS;
+exit:
+ return status;
+}
+
+#endif /* FIRMWARE_ARCH */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file control_protocol.hpp
+ * @brief Contains Defines and declarations related to control protocl
+ **/
+
+#ifndef _CONTROL_PROTOCOL_HPP_
+#define _CONTROL_PROTOCOL_HPP_
+
+#include "control_protocol.h"
+#include "firmware_status.h"
+#include "hailo/hailort.h"
+#include <stdint.h>
+
+typedef enum {
+ HAILO8_CLOCK_RATE = 400 * 1000 * 1000,
+ HAILO8R_CLOCK_RATE = 200 * 1000 * 1000
+} CONTROL_PROTOCOL__HAILO8_CLOCK_RATE_t;
+
+typedef struct {
+ uint8_t stream_index;
+ uint8_t is_input;
+ uint32_t communication_type;
+ uint8_t skip_nn_stream_config;
+ uint8_t power_mode; // CONTROL_PROTOCOL__power_mode_t
+ CONTROL_PROTOCOL__nn_stream_config_t nn_stream_config;
+ CONTROL_PROTOCOL__communication_config_prams_t communication_params;
+} CONTROL_PROTOCOL__config_stream_params_t;
+
+static_assert(sizeof(CONTROL_PROTOCOL__context_switch_context_index_t) <= UINT8_MAX,
+ "CONTROL_PROTOCOL__context_switch_context_index_t must fit in uint8_t");
+
+/* End of context switch structs */
+
+const char *CONTROL_PROTOCOL__get_textual_opcode(CONTROL_PROTOCOL__OPCODE_t opcode);
+
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__parse_response(uint8_t *message,
+ uint32_t message_size,
+ CONTROL_PROTOCOL__response_header_t **header,
+ CONTROL_PROTOCOL__payload_t **payload,
+ CONTROL_PROTOCOL__status_t *fw_status);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__get_sequence_from_response_buffer(uint8_t *response_buffer,
+ size_t response_buffer_size, uint32_t *sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_core_identify_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_memory_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_fw_logger_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, hailo_fw_logger_level_t level, uint8_t interface_mask);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_open_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_udp_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_mipi_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_input_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_stream_pcie_output_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_stream_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_close_stream_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t dataflow_manager_id, uint8_t is_input);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__reset_type_t reset_type);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, CONTROL_PROTOCOL__dvm_options_t dvm, CONTROL_PROTOCOL__power_measurement_types_t measurement_type);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t index, bool should_clear);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t delay_milliseconds, CONTROL_PROTOCOL__averaging_factor_t averaging_factor , CONTROL_PROTOCOL__sampling_period_t sampling_period);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_stop_power_measurement_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_start_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_finish_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t offset, const uint8_t *data, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_validate_firmware_update_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, MD5_SUM_t *expected_md5, uint32_t firmware_size);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_examine_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_user_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_erase_user_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_phy_operation_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__phy_operation_t operation_type);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_core_top_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, CONTROL_PROTOCOL__config_core_top_type_t config_type, CONTROL_PROTOCOL__config_core_top_params_t *params);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_write_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
+ uint32_t sequence, uint32_t offset, uint8_t endianness,
+ uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, const uint8_t *data, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_i2c_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size,
+ uint32_t sequence, uint32_t offset, uint8_t endianness,
+ uint16_t slave_address, uint8_t register_address_size, uint8_t bus_index, uint32_t data_length, bool should_hold_bus);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_read_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_latency_measurement_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t latency_measurement_en, uint32_t inbound_start_buffer_number, uint32_t outbound_stop_buffer_number, uint32_t inbound_stream_index, uint32_t outbound_stream_index);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_store_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t is_first, uint32_t section_index,
+ uint32_t start_offset, uint32_t reset_data_size, uint32_t sensor_type, uint32_t total_data_size, uint8_t *data, uint32_t data_length,
+ uint16_t config_height, uint16_t config_width, uint16_t config_fps, uint32_t config_name_length, uint8_t *config_name);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint32_t section_index, uint32_t offset, uint32_t data_length);
+hailo_status CONTROL_PROTOCOL__pack_sensor_set_i2c_bus_index_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t sensor_type, uint32_t bus_index);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_load_and_start_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_reset_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t section_index);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_set_generic_i2c_slave_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint16_t slave_address,
+ uint8_t register_address_size, uint8_t bus_index, uint8_t should_hold_bus, uint8_t endianness);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_sensor_get_sections_info_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_network_group_header_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ const CONTROL_PROTOCOL__application_header_t *network_group_header);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_set_context_info_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ const CONTROL_PROTOCOL__context_switch_context_info_single_control_t *context_info);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_set_measuremment_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t measurement_enable);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_idle_time_get_measuremment_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_download_context_action_list_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size, uint32_t sequence, uint32_t network_group_id,
+ CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, uint16_t action_list_offset);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status, uint8_t application_index,
+ uint16_t dynamic_batch_size, bool keep_nn_config_during_reset);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_enable(
+ CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t cpu_id,
+ bool should_enable);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_config(
+ CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t cpu_id,
+ uint32_t wd_cycles,
+ CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_mode);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_context_switch_clear_configured_apps_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_previous_system_state(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t cpu_id);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_dataflow_interrupt_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t interrupt_type, uint8_t interrupt_index, uint8_t interrupt_sub_index);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_set_host_info_request( CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t connection_type, uint16_t host_port, uint32_t host_ip_address);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_d2h_event_manager_send_host_info_event_request( CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t event_priority);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_chip_temperature_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_read_board_config(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_write_board_config_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint32_t address, const uint8_t *data, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_enable_debugging_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_rma);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_extended_device_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_breakpoint_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t breakpoint_id,
+ CONTROL_PROTOCOL__context_switch_breakpoint_control_t breakpoint_control,
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t *breakpoint_data);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_breakpoint_status_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint8_t breakpoint_id);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_context_switch_main_header_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__write_second_stage_to_internal_memory_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint32_t offset, uint8_t *data, uint32_t data_length);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__copy_second_stage_to_flash_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ MD5_SUM_t *expected_md5, uint32_t second_stage_size);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_pause_frames_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size,
+ uint32_t sequence,
+ uint8_t rx_pause_frames_enable);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_config_context_switch_timestamp_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint16_t batch_index, bool enable_user_configuration);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_run_bist_test_request(
+ CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool is_top_test,
+ uint32_t top_bypass_bitmap, uint8_t cluster_index, uint32_t cluster_bypass_bitmap_0, uint32_t cluster_bypass_bitmap_1);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_clock_freq_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
+ uint32_t clock_freq);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_health_information_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool should_activate);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_throttling_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, bool should_activate);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_overcurrent_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_get_hw_consts_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_set_sleep_state_request(CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, uint8_t sleep_state);
+HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_hw_infer_status_request(CONTROL_PROTOCOL__request_t *request,
+ size_t *request_size, uint32_t sequence, uint8_t hw_infer_state, uint8_t network_group_index,
+ uint16_t dynamic_batch_size, CONTROL_PROTOCOL__hw_infer_channels_info_t *channels_info);
+
+#endif /* _CONTROL_PROTOCOL_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file d2h_event_queue.hpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#ifndef HAILO_D2H_EVENT_QUEUE_HPP_
+#define HAILO_D2H_EVENT_QUEUE_HPP_
+
+#include "utils/thread_safe_queue.hpp"
+
+#include "d2h_events.h"
+
+
+namespace hailort
+{
+
+class D2hEventQueue : public SafeQueue<D2H_EVENT_MESSAGE_t> {
+public:
+ void clear() {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_queue = std::queue<D2H_EVENT_MESSAGE_t>();
+ }
+};
+
+} /* namespace hailort */
+
+#endif // HAILO_D2H_EVENT_QUEUE_HPP_
--- /dev/null
+/*
+ * =============================================================================
+ *
+ * HAILO
+ *
+ * Property of HAILO Tech
+ * For Unrestricted Internal Use Only
+ * Unauthorized reproduction and/or distribution is strictly prohibited.
+ * This product is protected under copyright law and trade secret law
+ * Created 2018, (C) Copyright 2018 Hailo Tech . All rights reserved.
+ * as an unpublished work.
+ */
+/**
+* Filename: d2h_events_parser.c
+*
+* Description: Implements parsing device to host notifications.
+*
+*=============================================================================*/
+
+#include <stdint.h>
+#include <string.h>
+#include "common/utils.hpp"
+#include "d2h_events.h"
+#include "byte_order.h"
+#include "common/logger_macros.hpp"
+
+using namespace hailort;
+
+/* Function prototype for control operations */
+typedef HAILO_COMMON_STATUS_t (*firmware_notifications_parser_t) (D2H_EVENT_MESSAGE_t *d2h_notification_message);
+
+/**********************************************************************
+ * Private Declarations
+ **********************************************************************/
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_rx_error(D2H_EVENT_MESSAGE_t *d2h_notification_message) ;
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_host_info_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_temperature_alarm_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_closed_streams_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_overcurrent_alert_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_nonfatal_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_fatal_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_error_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_breakpoint_reached(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_clock_changed_event_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+
+/**********************************************************************
+ * Globals
+ **********************************************************************/
+firmware_notifications_parser_t g_firmware_notifications_parser[D2H_EVENT_ID_COUNT] = {
+ D2H_EVENTS__parse_rx_error,
+ D2H_EVENTS__parse_host_info_notification,
+ D2H_EVENTS__parse_health_monitor_temperature_alarm_notification,
+ D2H_EVENTS__parse_health_monitor_closed_streams_notification,
+ D2H_EVENTS__parse_health_monitor_overcurrent_alert_notification,
+ D2H_EVENTS__parse_health_monitor_lcu_ecc_nonfatal_notification,
+ D2H_EVENTS__parse_health_monitor_lcu_ecc_fatal_notification,
+ D2H_EVENTS__parse_health_monitor_cpu_ecc_error_notification,
+ D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification,
+ D2H_EVENTS__parse_context_switch_breakpoint_reached,
+ D2H_EVENTS__parse_health_monitor_clock_changed_event_notification
+};
+/**********************************************************************
+ * Internal Functions
+ **********************************************************************/
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_rx_error(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_RX_ERROR_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.rx_error_event)) {
+ LOGGER__ERROR("d2h notification invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ LOGGER__INFO("Got Rx Error {} Event From module_id {} with error {}, queue {}",((D2H_EVENT_PRIORITY_CRITICAL == d2h_notification_message->header.priority) ?"Critical":"Info"),
+ d2h_notification_message->header.module_id, d2h_notification_message->message_parameters.rx_error_event.error, d2h_notification_message->message_parameters.rx_error_event.queue_number);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_host_info_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HOST_INFO_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.host_info_event)) {
+ LOGGER__ERROR("d2h notification invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ LOGGER__INFO("Got host config {} Event From module_id {} with connection type {}",((D2H_EVENT_PRIORITY_CRITICAL == d2h_notification_message->header.priority) ?"Critical":"Info"),
+ d2h_notification_message->header.module_id, ((D2H_EVENT_COMMUNICATION_TYPE_UDP == d2h_notification_message->message_parameters.host_info_event.connection_type) ?"UDP":"PCIe"));
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_temperature_alarm_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_TEMPERATURE_ALARM_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ switch (d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.temperature_zone) {
+ case HAILO_TEMPERATURE_PROTECTION_TEMPERATURE_ZONE__GREEN:
+ LOGGER__INFO("Got health monitor notification - temperature reached green zone. sensor id={}, TS00={}c, TS01={}c",
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.alarm_ts_id,
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts0_temperature,
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts1_temperature);
+ break;
+
+ case HAILO_TEMPERATURE_PROTECTION_TEMPERATURE_ZONE__ORANGE:
+ LOGGER__WARNING("Got health monitor notification - temperature reached orange zone. sensor id={}, TS00={}c, TS01={}c",
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.alarm_ts_id,
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts0_temperature,
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts1_temperature);
+ break;
+
+ case HAILO_TEMPERATURE_PROTECTION_TEMPERATURE_ZONE__RED:
+ LOGGER__CRITICAL("Got health monitor notification - temperature reached red zone. sensor id={}, TS00={}c, TS01={}c",
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.alarm_ts_id,
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts0_temperature,
+ d2h_notification_message->message_parameters.health_monitor_temperature_alarm_event.ts1_temperature);
+ break;
+
+ default:
+ LOGGER__ERROR("Got invalid health monitor notification - temperature zone could not be parsed.");
+ status = HAILO_STATUS__D2H_EVENTS__INVALID_ARGUMENT;
+ goto l_exit;
+ }
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_clock_changed_event_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_CLOCK_CHANGED_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+ LOGGER__WARNING("Got health monitor notification - System's clock has been changed from {} to {}",
+ d2h_notification_message->message_parameters.health_monitor_clock_changed_event.previous_clock,
+ d2h_notification_message->message_parameters.health_monitor_clock_changed_event.current_clock);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_closed_streams_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_CLOSED_STREAMS_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h notification invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.health_monitor_closed_streams_event)) {
+ LOGGER__ERROR("d2h notification invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ LOGGER__CRITICAL("Got health monitor closed streams notification. temperature: TS00={} c, TS01={} c, inputs bitfield:{:x}, outputs bitfield:{:x}",
+ d2h_notification_message->message_parameters.health_monitor_closed_streams_event.ts0_temperature,
+ d2h_notification_message->message_parameters.health_monitor_closed_streams_event.ts1_temperature,
+ d2h_notification_message->message_parameters.health_monitor_closed_streams_event.closed_input_streams,
+ d2h_notification_message->message_parameters.health_monitor_closed_streams_event.closed_output_streams);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_overcurrent_alert_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_OVERCURRENT_ALERT_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(d2h_notification_message->header.payload_length != sizeof(d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event)) {
+ LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ if (d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.is_last_overcurrent_violation_reached) {
+ LOGGER__WARNING("Got health monitor notification - last overcurrent violation allow alert state. The exceeded alert threshold is {} mA",
+ d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.exceeded_alert_threshold);
+ } else {
+ switch (d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.overcurrent_zone) {
+ case HAILO_OVERCURRENT_PROTECTION_OVERCURRENT_ZONE__GREEN:
+ LOGGER__INFO("Got health monitor notification - overcurrent reached green zone. clk frequency decrease process was stopped. The exceeded alert threshold is {} mA",
+ d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.exceeded_alert_threshold);
+ break;
+ case HAILO_OVERCURRENT_PROTECTION_OVERCURRENT_ZONE__RED:
+ LOGGER__CRITICAL("Got health monitor notification - overcurrent reached red zone. clk frequency decrease process was started. The exceeded alert threshold is {} mA",
+ d2h_notification_message->message_parameters.health_monitor_overcurrent_alert_event.exceeded_alert_threshold);
+ break;
+ default:
+ LOGGER__ERROR("Got invalid health monitor notification - overcurrent alert state could not be parsed.");
+ status = HAILO_STATUS__D2H_EVENTS__INVALID_ARGUMENT;
+ goto l_exit;
+ }
+ }
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_nonfatal_notification(
+ D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_LCU_ECC_ERROR_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h event lcu ecc uncorrectable error invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(sizeof(d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event) != d2h_notification_message->header.payload_length) {
+ LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ LOGGER__WARNING("Got health monitor LCU ECC correctable error event. cluster_bitmap={}",
+ d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event.cluster_bitmap);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_lcu_ecc_fatal_notification(
+ D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_LCU_ECC_ERROR_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h event invalid lcu ecc uncorrectable error parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(sizeof(d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event) != d2h_notification_message->header.payload_length) {
+ LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ LOGGER__CRITICAL("Got health monitor LCU ECC uncorrectable error event. cluster_bitmap={}",
+ d2h_notification_message->message_parameters.health_monitor_lcu_ecc_error_event.cluster_bitmap);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_error_notification(
+ D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ CHECK(D2H_EVENT_HEALTH_MONITOR_CPU_ECC_EVENT_PARAMETER_COUNT == d2h_notification_message->header.parameter_count,
+ HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT,
+ "d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+
+ CHECK(sizeof(d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event) == d2h_notification_message->header.payload_length,
+ HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH,
+ "d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+
+ LOGGER__ERROR("Got health monitor CPU ECC error event. memory_bitmap={}",
+ d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event.memory_bitmap);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification(
+ D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_HEALTH_MONITOR_CPU_ECC_EVENT_PARAMETER_COUNT != d2h_notification_message->header.parameter_count) {
+ LOGGER__ERROR("d2h event invalid cpu ecc uncorrectable error parameter count: {}", d2h_notification_message->header.parameter_count);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT;
+ goto l_exit;
+ }
+
+ if(sizeof(d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event) != d2h_notification_message->header.payload_length) {
+ LOGGER__ERROR("d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+ status = HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH;
+ goto l_exit;
+ }
+
+ LOGGER__CRITICAL("Got health monitor CPU ECC fatal event. memory_bitmap={}",
+ d2h_notification_message->message_parameters.health_monitor_cpu_ecc_event.memory_bitmap);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+l_exit:
+ return status;
+}
+
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_breakpoint_reached(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ CHECK(D2H_EVENT_CONTEXT_SWITCH_BREAKPOINT_REACHED_EVENT_PARAMETER_COUNT == d2h_notification_message->header.parameter_count,
+ HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT,
+ "d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+
+ CHECK(d2h_notification_message->header.payload_length ==
+ sizeof(d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event),
+ HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH,
+ "d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+
+ LOGGER__INFO("Got Context switch breakpoint with net_group index {}, batch index {}, context index {}, action index {}",
+ d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.application_index,
+ d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.batch_index,
+ d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.context_index,
+ d2h_notification_message->message_parameters.context_switch_breakpoint_reached_event.action_index);
+
+ status = HAILO_COMMON_STATUS__SUCCESS;
+
+ return status;
+}
+
+/**********************************************************************
+ * Public Functions
+ **********************************************************************/
+HAILO_COMMON_STATUS_t D2H_EVENTS__parse_event(D2H_EVENT_MESSAGE_t *d2h_notification_message){
+
+ HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ if (D2H_EVENT_ID_COUNT < d2h_notification_message->header.event_id){
+ LOGGER__ERROR("d2h notification invalid notification_id: {}", d2h_notification_message->header.event_id);
+ status = HAILO_STATUS__D2H_EVENTS__INVALID_ARGUMENT;
+ goto l_exit;
+ }
+ status = g_firmware_notifications_parser[d2h_notification_message->header.event_id](d2h_notification_message);
+
+l_exit:
+ return status;
+}
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file device.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/device.hpp"
+
+#include "common/utils.hpp"
+
+#include "device_common/control.hpp"
+#include "vdma/pcie/pcie_device.hpp"
+#include "vdma/integrated/integrated_device.hpp"
+#include "eth/eth_device.hpp"
+
+#include "byte_order.h"
+#include "firmware_header_utils.h"
+#include "control_protocol.h"
+#include <memory>
+#ifndef _MSC_VER
+#include <sys/utsname.h>
+#endif
+
+
+namespace hailort
+{
+
+#define WRITE_CHUNK_SIZE (1024)
+#define DEVICE_WORD_SIZE (4)
+
+Device::Device(Type type) :
+ m_type(type),
+ m_control_sequence(0),
+ m_is_control_version_supported(false),
+ m_device_architecture(HAILO_ARCH_MAX_ENUM)
+{
+#ifndef _MSC_VER
+ struct utsname uname_data;
+ if (-1 != uname(&uname_data)) {
+ LOGGER__INFO("OS Version: {} {} {} {}", uname_data.sysname, uname_data.release,
+ uname_data.version,uname_data.machine);
+ } else {
+ LOGGER__ERROR("uname failed (errno = {})", errno);
+ }
+#endif
+}
+
+Expected<std::vector<std::string>> Device::scan()
+{
+ // TODO: HRT-7530 support both CORE and PCIE
+ if (IntegratedDevice::is_loaded()) {
+ return std::vector<std::string>{IntegratedDevice::DEVICE_ID};
+ }
+ else {
+ auto pcie_device_infos = PcieDevice::scan();
+ CHECK_EXPECTED(pcie_device_infos);
+
+ std::vector<std::string> results;
+ results.reserve(pcie_device_infos->size());
+
+ for (const auto pcie_device_info : pcie_device_infos.release()) {
+ auto device_id = pcie_device_info_to_string(pcie_device_info);
+ CHECK_EXPECTED(device_id);
+ results.emplace_back(device_id.release());
+ }
+
+ return results;
+ }
+}
+
+Expected<std::vector<hailo_pcie_device_info_t>> Device::scan_pcie()
+{
+ return PcieDevice::scan();
+}
+
+Expected<std::vector<hailo_eth_device_info_t>> Device::scan_eth(const std::string &interface_name,
+ std::chrono::milliseconds timeout)
+{
+ return EthernetDevice::scan(interface_name, timeout);
+}
+
+Expected<std::vector<hailo_eth_device_info_t>> Device::scan_eth_by_host_address(const std::string &host_address,
+ std::chrono::milliseconds timeout)
+{
+ return EthernetDevice::scan_by_host_address(host_address, timeout);
+}
+
+Expected<std::unique_ptr<Device>> Device::create()
+{
+ auto device_ids = scan();
+ CHECK_EXPECTED(device_ids, "Failed scan devices");
+ CHECK_AS_EXPECTED(device_ids->size() == 1, HAILO_INVALID_OPERATION,
+ "Expected only 1 device on the system (found {}). Pass device_id to create a specific device", device_ids->size());
+
+ return Device::create(device_ids->at(0));
+}
+
+Expected<std::unique_ptr<Device>> Device::create(const std::string &device_id)
+{
+ const bool DONT_LOG_ON_FAILURE = false;
+ if (IntegratedDevice::DEVICE_ID == device_id) {
+ return create_core();
+ }
+ else if (auto pcie_info = PcieDevice::parse_pcie_device_info(device_id, DONT_LOG_ON_FAILURE)) {
+ return create_pcie(pcie_info.release());
+ }
+ else if (auto eth_info = EthernetDevice::parse_eth_device_info(device_id, DONT_LOG_ON_FAILURE)) {
+ return create_eth(eth_info.release());
+ }
+ else {
+ LOGGER__ERROR("Invalid device id {}", device_id);
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+}
+
+Expected<std::unique_ptr<Device>> Device::create_pcie()
+{
+ auto pcie_device = PcieDevice::create();
+ CHECK_EXPECTED(pcie_device);
+ // Upcasting to Device unique_ptr (from PcieDevice unique_ptr)
+ auto device = std::unique_ptr<Device>(pcie_device.release());
+ return device;
+}
+
+Expected<std::unique_ptr<Device>> Device::create_pcie(const hailo_pcie_device_info_t &device_info)
+{
+ auto pcie_device = PcieDevice::create(device_info);
+ CHECK_EXPECTED(pcie_device);
+ // Upcasting to Device unique_ptr (from PcieDevice unique_ptr)
+ auto device = std::unique_ptr<Device>(pcie_device.release());
+ return device;
+}
+
+Expected<std::unique_ptr<Device>> Device::create_eth(const hailo_eth_device_info_t &device_info)
+{
+ auto eth_device = EthernetDevice::create(device_info);
+ CHECK_EXPECTED(eth_device);
+ // Upcasting to Device unique_ptr (from EthernetDevice unique_ptr)
+ auto device = std::unique_ptr<Device>(eth_device.release());
+ return device;
+}
+
+Expected<std::unique_ptr<Device>> Device::create_eth(const std::string &ip_addr)
+{
+ auto eth_device = EthernetDevice::create(ip_addr);
+ CHECK_EXPECTED(eth_device);
+ // Upcasting to Device unique_ptr (from EthernetDevice unique_ptr)
+ auto device = std::unique_ptr<Device>(eth_device.release());
+ return device;
+}
+
+Expected<hailo_pcie_device_info_t> Device::parse_pcie_device_info(const std::string &device_info_str)
+{
+ const bool LOG_ON_FAILURE = true;
+ return PcieDevice::parse_pcie_device_info(device_info_str, LOG_ON_FAILURE);
+}
+
+Expected<std::string> Device::pcie_device_info_to_string(const hailo_pcie_device_info_t &device_info)
+{
+ return PcieDevice::pcie_device_info_to_string(device_info);
+}
+
+Expected<Device::Type> Device::get_device_type(const std::string &device_id)
+{
+ const bool DONT_LOG_ON_FAILURE = false;
+ if (IntegratedDevice::DEVICE_ID == device_id) {
+ return Type::INTEGRATED;
+ }
+ else if (auto pcie_info = PcieDevice::parse_pcie_device_info(device_id, DONT_LOG_ON_FAILURE)) {
+ return Type::PCIE;
+ }
+ else if (auto eth_info = EthernetDevice::parse_eth_device_info(device_id, DONT_LOG_ON_FAILURE)) {
+ return Type::ETH;
+ }
+ else {
+ LOGGER__ERROR("Invalid device id {}", device_id);
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+}
+
+uint32_t Device::get_control_sequence()
+{
+ return m_control_sequence;
+}
+
+bool Device::is_control_version_supported()
+{
+ return m_is_control_version_supported;
+}
+
+Device::Type Device::get_type() const
+{
+ return m_type;
+}
+
+Expected<hailo_stream_interface_t> Device::get_default_streams_interface() const
+{
+ switch(m_type) {
+ case Type::PCIE:
+ return HAILO_STREAM_INTERFACE_PCIE;
+ case Type::INTEGRATED:
+ return HAILO_STREAM_INTERFACE_INTEGRATED;
+ case Type::ETH:
+ return HAILO_STREAM_INTERFACE_ETH;
+ default:
+ LOGGER__ERROR("Failed to get default streams interface.");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+}
+
+hailo_status Device::set_fw_logger(hailo_fw_logger_level_t level, uint32_t interface_mask)
+{
+ return Control::set_fw_logger(*this, level, interface_mask);
+}
+
+hailo_status Device::set_throttling_state(bool should_activate)
+{
+ return Control::set_throttling_state(*this, should_activate);
+}
+
+Expected<bool> Device::get_throttling_state()
+{
+ return Control::get_throttling_state(*this);
+}
+
+hailo_status Device::write_memory(uint32_t address, const MemoryView &data)
+{
+ return Control::write_memory(*this, address, data.data(), static_cast<uint32_t>(data.size()));
+}
+
+hailo_status Device::read_memory(uint32_t address, MemoryView &data)
+{
+ return Control::read_memory(*this, address, data.data(), static_cast<uint32_t>(data.size()));
+}
+
+hailo_status Device::wd_enable(hailo_cpu_id_t cpu_id)
+{
+ return static_cast<hailo_status>(Control::wd_enable(*this, static_cast<uint8_t>(cpu_id), true));
+}
+
+hailo_status Device::wd_disable(hailo_cpu_id_t cpu_id)
+{
+ return Control::wd_enable(*this, static_cast<uint8_t>(cpu_id), false);
+}
+
+hailo_status Device::wd_config(hailo_cpu_id_t cpu_id, uint32_t wd_cycles, hailo_watchdog_mode_t wd_mode)
+{
+ CONTROL_PROTOCOL__WATCHDOG_MODE_t wd_type = CONTROL_PROTOCOL__WATCHDOG_NUM_MODES; // set invalid value
+ switch(wd_mode) {
+ case HAILO_WATCHDOG_MODE_HW_SW:
+ wd_type = CONTROL_PROTOCOL__WATCHDOG_MODE_HW_SW;
+ break;
+ case HAILO_WATCHDOG_MODE_HW_ONLY:
+ wd_type = CONTROL_PROTOCOL__WATCHDOG_MODE_HW_ONLY;
+ break;
+ default:
+ LOGGER__ERROR("Invalid wd_mode");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return Control::wd_config(*this, static_cast<uint8_t>(cpu_id), wd_cycles, wd_type);
+}
+
+Expected<uint32_t> Device::previous_system_state(hailo_cpu_id_t cpu_id)
+{
+ CONTROL_PROTOCOL__system_state_t res = {};
+ auto status = Control::previous_system_state(*this, static_cast<uint8_t>(cpu_id), &res);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return res;
+}
+
+hailo_status Device::set_pause_frames(bool rx_pause_frames_enable)
+{
+ return Control::set_pause_frames(*this, rx_pause_frames_enable);
+}
+
+hailo_status Device::i2c_read(const hailo_i2c_slave_config_t &slave_config, uint32_t register_address, MemoryView &data)
+{
+ return Control::i2c_read(*this, &slave_config, register_address, data.data(), static_cast<uint32_t>(data.size()));
+}
+
+hailo_status Device::i2c_write(const hailo_i2c_slave_config_t &slave_config, uint32_t register_address, const MemoryView &data)
+{
+ return Control::i2c_write(*this, &slave_config, register_address, data.data(), static_cast<uint32_t>(data.size()));
+}
+
+Expected<float32_t> Device::power_measurement(hailo_dvm_options_t dvm, hailo_power_measurement_types_t measurement_type)
+{
+ float32_t res = 0;
+ auto status = Control::power_measurement(*this, static_cast<CONTROL_PROTOCOL__dvm_options_t>(dvm),
+ static_cast<CONTROL_PROTOCOL__power_measurement_types_t>(measurement_type), &res);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return res;
+}
+
+hailo_status Device::start_power_measurement(hailo_averaging_factor_t averaging_factor, hailo_sampling_period_t sampling_period)
+{
+ return Control::start_power_measurement(*this, static_cast<CONTROL_PROTOCOL__averaging_factor_t>(averaging_factor),
+ static_cast<CONTROL_PROTOCOL__sampling_period_t>(sampling_period));
+}
+
+hailo_status Device::set_power_measurement(hailo_measurement_buffer_index_t buffer_index, hailo_dvm_options_t dvm, hailo_power_measurement_types_t measurement_type)
+{
+ return Control::set_power_measurement(*this, buffer_index, static_cast<CONTROL_PROTOCOL__dvm_options_t>(dvm), static_cast<CONTROL_PROTOCOL__power_measurement_types_t>(measurement_type));
+}
+
+Expected<hailo_power_measurement_data_t> Device::get_power_measurement(hailo_measurement_buffer_index_t buffer_index, bool should_clear)
+{
+ hailo_power_measurement_data_t measurement_data = {};
+ auto status = Control::get_power_measurement(*this, buffer_index, should_clear, &measurement_data);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return measurement_data;
+}
+
+hailo_status Device::stop_power_measurement()
+{
+ return Control::stop_power_measurement(*this);
+}
+
+Expected<hailo_chip_temperature_info_t> Device::get_chip_temperature()
+{
+ hailo_chip_temperature_info_t res = {};
+ auto status = Control::get_chip_temperature(*this, &res);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return res;
+}
+
+hailo_status Device::test_chip_memories()
+{
+ return Control::test_chip_memories(*this);
+}
+
+hailo_status Device::set_sleep_state(hailo_sleep_state_t sleep_state)
+{
+ return Control::set_sleep_state(*this, sleep_state);
+}
+
+hailo_status Device::direct_write_memory(uint32_t address, const void *buffer, uint32_t size)
+{
+ (void) address;
+ (void) buffer;
+ (void) size;
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status Device::direct_read_memory(uint32_t address, void *buffer, uint32_t size)
+{
+ (void) address;
+ (void) buffer;
+ (void) size;
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<hailo_device_identity_t> Device::identify()
+{
+ return Control::identify(*this);
+}
+
+Expected<hailo_core_information_t> Device::core_identify()
+{
+ hailo_core_information_t res = {};
+ auto status = Control::core_identify(*this, &res);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return res;
+}
+
+Expected<hailo_extended_device_information_t> Device::get_extended_device_information()
+{
+ return Control::get_extended_device_information(*this);
+}
+
+// Note: This function needs to be called after each reset/fw_update if we want the device's
+// state to remain valid after these ops (see HRT-3116)
+hailo_status Device::update_fw_state()
+{
+ // Assuming FW is loaded, send identify
+ auto board_info_expected = Control::identify(*this);
+ CHECK_EXPECTED_AS_STATUS(board_info_expected);
+ hailo_device_identity_t board_info = board_info_expected.release();
+
+ if ((FIRMWARE_VERSION_MAJOR == board_info.fw_version.major) &&
+ (FIRMWARE_VERSION_MINOR == board_info.fw_version.minor)) {
+ m_is_control_version_supported = true;
+ } else {
+ LOGGER__WARNING("Unsupported firmware operation. Host: {}.{}.{}, Device: {}.{}.{}{}",
+ FIRMWARE_VERSION_MAJOR,
+ FIRMWARE_VERSION_MINOR,
+ FIRMWARE_VERSION_REVISION,
+ board_info.fw_version.major,
+ board_info.fw_version.minor,
+ board_info.fw_version.revision,
+ DEV_STRING_NOTE(board_info.is_release));
+ m_is_control_version_supported = false;
+ }
+ m_device_architecture = board_info.device_architecture;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Device::fw_interact(uint8_t *request_buffer, size_t request_size,
+ uint8_t *response_buffer, size_t *response_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t *request = (CONTROL_PROTOCOL__request_t *)(request_buffer);
+ uint32_t opcode = HAILO_CONTROL_OPCODE_COUNT;
+ ASSERT(NULL != request_buffer);
+ ASSERT(NULL != response_buffer);
+ hailo_cpu_id_t cpu_id;
+
+ opcode = BYTE_ORDER__ntohl(request->header.common_header.opcode);
+ /* Make sure that the version is supported or opcode is critical */
+ if (!m_is_control_version_supported &&
+ !g_CONTROL_PROTOCOL__is_critical[opcode]){
+ LOGGER__ERROR(
+ "Operation {} is not allowed when FW version in not supported. Host supported FW version is {}.{}.{}",
+ BYTE_ORDER__ntohl(request->header.common_header.opcode),
+ FIRMWARE_VERSION_MAJOR, FIRMWARE_VERSION_MINOR, FIRMWARE_VERSION_REVISION
+ );
+ return HAILO_UNSUPPORTED_FW_VERSION;
+ }
+ /* Get the CPU ID */
+ cpu_id = (hailo_cpu_id_t)g_CONTROL_PROTOCOL__cpu_id[opcode];
+
+ status = this->fw_interact_impl(request_buffer, request_size, response_buffer, response_size, cpu_id);
+
+ // Always increment sequence
+ this->increment_control_sequence();
+ // Check this->fw_interact_impl
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Device::set_overcurrent_state(bool should_activate)
+{
+ return Control::set_overcurrent_state(*this, should_activate);
+}
+
+Expected<bool> Device::get_overcurrent_state()
+{
+ return Control::get_overcurrent_state(*this);
+}
+
+Expected<hailo_health_info_t> Device::get_health_information()
+{
+ return Control::get_health_information(*this);
+}
+
+Expected<std::vector<uint8_t>> Device::get_number_of_dynamic_contexts_per_network_group()
+{
+ CONTROL_PROTOCOL__context_switch_main_header_t context_switch_main_header{};
+ const auto status = Control::get_context_switch_main_header(*this, &context_switch_main_header);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ std::vector<uint8_t> number_of_contexts_per_network_group;
+ for (auto network_group_index = 0; network_group_index < context_switch_main_header.application_count; network_group_index++) {
+ const uint32_t num_contexts = context_switch_main_header.application_header[network_group_index].dynamic_contexts_count;
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(num_contexts), HAILO_INTERNAL_FAILURE, "num_contexts must fit in one byte");
+ number_of_contexts_per_network_group.emplace_back(static_cast<uint8_t>(num_contexts));
+ }
+
+ return number_of_contexts_per_network_group;
+}
+
+Expected<Buffer> Device::download_context_action_list(uint32_t network_group_id, uint8_t context_type,
+ uint8_t context_index, uint32_t *base_address, uint32_t *batch_counter, uint16_t max_size)
+{
+ CHECK_ARG_NOT_NULL_AS_EXPECTED(base_address);
+ CHECK_ARG_NOT_NULL_AS_EXPECTED(batch_counter);
+
+ // Allocate room for an action list of at most max_size bytes
+ auto action_list = Buffer::create(max_size);
+ CHECK_EXPECTED(action_list);
+
+ uint32_t base_address_local = 0;
+ uint32_t batch_counter_local = 0;
+ uint16_t actual_size = 0;
+ const auto status = Control::download_context_action_list(*this, network_group_id,
+ (CONTROL_PROTOCOL__context_switch_context_type_t)context_type, context_index, action_list->size(),
+ &base_address_local, action_list->data(), &actual_size, &batch_counter_local);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_AS_EXPECTED(actual_size <= max_size, HAILO_INTERNAL_FAILURE);
+
+ // Create a copy of the list, truncating to the needed size
+ auto final_action_list = Buffer::create(action_list->data(), actual_size);
+ CHECK_EXPECTED(action_list);
+
+ // Transfer ownership of out params
+ *base_address = base_address_local;
+ *batch_counter = batch_counter_local;
+
+ return final_action_list.release();
+}
+
+hailo_status Device::set_context_action_list_timestamp_batch(uint16_t batch_index)
+{
+ static const bool ENABLE_USER_CONFIG = true;
+ return Control::config_context_switch_timestamp(*this, batch_index, ENABLE_USER_CONFIG);
+}
+
+hailo_status Device::set_context_switch_breakpoint(uint8_t breakpoint_id, bool break_at_any_network_group_index,
+ uint8_t network_group_index, bool break_at_any_batch_index, uint16_t batch_index, bool break_at_any_context_index,
+ uint8_t context_index, bool break_at_any_action_index, uint16_t action_index)
+{
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t breakpoint_data = {
+ break_at_any_network_group_index,
+ network_group_index,
+ break_at_any_batch_index,
+ batch_index,
+ break_at_any_context_index,
+ context_index,
+ break_at_any_action_index,
+ action_index};
+
+ auto status = Control::config_context_switch_breakpoint(*this, breakpoint_id,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_BREAKPOINT_CONTROL_SET, &breakpoint_data);
+ CHECK_SUCCESS(status, "Failed Setting context switch breakpoint in continue breakpoint");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Device::continue_context_switch_breakpoint(uint8_t breakpoint_id)
+{
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t breakpoint_data = {false, 0, false, 0, false, 0, false, 0};
+
+ auto status = Control::config_context_switch_breakpoint(*this, breakpoint_id,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_BREAKPOINT_CONTROL_CONTINUE, &breakpoint_data);
+ CHECK_SUCCESS(status, "Failed Setting context switch breakpoint in continue breakpoint");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Device::clear_context_switch_breakpoint(uint8_t breakpoint_id)
+{
+ CONTROL_PROTOCOL__context_switch_breakpoint_data_t breakpoint_data = {false, 0, false, 0, false, 0, false, 0};
+
+ auto status = Control::config_context_switch_breakpoint(*this, breakpoint_id,
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_BREAKPOINT_CONTROL_CLEAR, &breakpoint_data);
+ CHECK_SUCCESS(status, "Failed Setting context switch breakpoint in clear breakpoint");
+
+ return HAILO_SUCCESS;
+}
+
+Expected<uint8_t> Device::get_context_switch_breakpoint_status(uint8_t breakpoint_id)
+{
+ CONTROL_PROTOCOL__context_switch_debug_sys_status_t breakpoint_status =
+ CONTROL_PROTOCOL__CONTEXT_SWITCH_DEBUG_SYS_STATUS_COUNT;
+
+ auto status = Control::get_context_switch_breakpoint_status(*this, breakpoint_id,
+ &breakpoint_status);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed getting context switch breakpoint");
+
+ return static_cast<uint8_t>(breakpoint_status);
+}
+
+Expected<std::unique_ptr<Device>> Device::create_core()
+{
+ auto integrated_device = IntegratedDevice::create();
+ CHECK_EXPECTED(integrated_device);
+ // Upcasting to Device unique_ptr (from IntegratedDevice unique_ptr)
+ auto device = std::unique_ptr<Device>(integrated_device.release());
+ return device;
+}
+
+Expected<NetworkGroupsParamsMap> Device::create_configure_params(Hef &hef) const
+{
+ auto stream_interface = get_default_streams_interface();
+ CHECK_EXPECTED(stream_interface, "Failed to get default streams interface");
+
+ return hef.create_configure_params(stream_interface.release());
+}
+
+Expected<ConfigureNetworkParams> Device::create_configure_params(Hef &hef, const std::string &network_group_name) const
+{
+ auto stream_interface = get_default_streams_interface();
+ CHECK_EXPECTED(stream_interface, "Failed to get default streams interface");
+
+ return hef.create_configure_params(stream_interface.release(), network_group_name);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file device_internal.cpp
+ * @brief Implementation of DeviceBase class
+ **/
+
+#include "hailo/hailort.h"
+
+#include "common/os_utils.hpp"
+
+#include "device_common/control.hpp"
+#include "device_common/device_internal.hpp"
+#include "utils/sensor_config_utils.hpp"
+
+
+namespace hailort
+{
+
+DeviceBase::DeviceBase(Type type) :
+ Device::Device(type),
+ m_d2h_notification_queue(),
+ m_d2h_notification_thread(),
+ m_notif_fetch_thread_params(make_shared_nothrow<NotificationThreadSharedParams>()),
+ m_d2h_callbacks{{0,0}},
+ m_callbacks_lock()
+ // TODO: Handle m_notif_fetch_thread_params null pointer
+{
+#ifndef NDEBUG
+ LOGGER__WARNING("libhailort is running in \"debug\" mode. Overall performance might be affected!");
+#endif
+#ifdef HAILO_EMULATOR
+ LOGGER__WARNING("libhailort is running in \"Emulator\" mode.");
+#endif
+}
+
+DeviceBase::~DeviceBase()
+{
+ stop_d2h_notification_thread();
+}
+
+Expected<ConfiguredNetworkGroupVector> DeviceBase::configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params)
+{
+ auto start_time = std::chrono::steady_clock::now();
+
+ auto status = check_hef_is_compatible(hef);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto network_groups = add_hef(hef, configure_params);
+ CHECK_EXPECTED(network_groups);
+
+ auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
+ LOGGER__INFO("Configuring HEF took {} milliseconds", elapsed_time_ms);
+
+ return network_groups;
+}
+
+hailo_status DeviceBase::reset(hailo_reset_device_mode_t mode)
+{
+ CONTROL_PROTOCOL__reset_type_t reset_type = CONTROL_PROTOCOL__RESET_TYPE__COUNT; // set invalid value
+ switch(mode) {
+ case HAILO_RESET_DEVICE_MODE_CHIP:
+ reset_type = CONTROL_PROTOCOL__RESET_TYPE__CHIP;
+ break;
+ case HAILO_RESET_DEVICE_MODE_NN_CORE:
+ reset_type = CONTROL_PROTOCOL__RESET_TYPE__NN_CORE;
+ break;
+ case HAILO_RESET_DEVICE_MODE_SOFT:
+ reset_type = CONTROL_PROTOCOL__RESET_TYPE__SOFT;
+ break;
+ case HAILO_RESET_DEVICE_MODE_FORCED_SOFT:
+ reset_type = CONTROL_PROTOCOL__RESET_TYPE__FORCED_SOFT;
+ break;
+ default:
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return reset_impl(reset_type);
+}
+
+hailo_status DeviceBase::set_notification_callback(const NotificationCallback &func, hailo_notification_id_t notification_id, void *opaque)
+{
+ CHECK((0 <= notification_id) && (HAILO_NOTIFICATION_ID_COUNT > notification_id), HAILO_INVALID_ARGUMENT,
+ "Notification id value is invalid");
+ CHECK_ARG_NOT_NULL(func);
+
+ auto func_ptr = make_shared_nothrow<NotificationCallback>(func);
+ CHECK_NOT_NULL(func_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ const std::lock_guard<std::mutex> lock(m_callbacks_lock);
+ m_d2h_callbacks[notification_id].func = func_ptr;
+ m_d2h_callbacks[notification_id].opaque = opaque;
+ return HAILO_SUCCESS;
+}
+
+hailo_status DeviceBase::remove_notification_callback(hailo_notification_id_t notification_id)
+{
+ CHECK((0 <= notification_id) && (HAILO_NOTIFICATION_ID_COUNT > notification_id), HAILO_INVALID_ARGUMENT,
+ "Notification id value is invalid");
+
+ const std::lock_guard<std::mutex> lock(m_callbacks_lock);
+ m_d2h_callbacks[notification_id].func = nullptr;
+ m_d2h_callbacks[notification_id].opaque = nullptr;
+
+ return HAILO_SUCCESS;
+}
+
+void DeviceBase::activate_notifications(const std::string &device_id)
+{
+ this->start_d2h_notification_thread(device_id);
+ this->start_notification_fetch_thread(&m_d2h_notification_queue);
+}
+
+hailo_status DeviceBase::stop_notification_fetch_thread()
+{
+ hailo_status status = HAILO_SUCCESS; // best effort
+
+ if (m_notif_fetch_thread_params->is_running) {
+ m_notif_fetch_thread_params->is_running = false;
+ auto disable_status = this->disable_notifications();
+ if (HAILO_SUCCESS != disable_status) {
+ status = disable_status;
+ LOGGER__WARNING("Failed disabling notifications using ioctl command");
+ }
+ }
+
+ // join thread even if disable_notifications failed - so we don't have non-joined thread
+ if (m_notification_fetch_thread.joinable()) {
+ m_notification_fetch_thread.join();
+ }
+
+ return status;
+}
+
+void DeviceBase::start_notification_fetch_thread(D2hEventQueue *write_queue)
+{
+ m_notif_fetch_thread_params->write_queue = write_queue;
+ m_notif_fetch_thread_params->is_running = true;
+ m_notification_fetch_thread = std::thread(&DeviceBase::notification_fetch_thread, this, m_notif_fetch_thread_params);
+}
+
+void DeviceBase::notification_fetch_thread(std::shared_ptr<NotificationThreadSharedParams> params)
+{
+ OsUtils::set_current_thread_name("NOTIFY_READ");
+ while (params->is_running) {
+ auto expected_notification = this->read_notification();
+ if (HAILO_SUCCESS != expected_notification.status()) {
+ if (params->is_running) {
+ LOGGER__ERROR("Read notification failed with status={}", expected_notification.status());
+ }
+ break;
+ }
+ params->write_queue->push(expected_notification.release());
+ }
+}
+
+Expected<firmware_type_t> DeviceBase::get_fw_type()
+{
+ firmware_type_t firmware_type;
+ const auto architecture = get_architecture();
+ CHECK_EXPECTED(architecture);
+
+ if ((architecture.value() == HAILO_ARCH_HAILO8) || (architecture.value() == HAILO_ARCH_HAILO8L)) {
+ firmware_type = FIRMWARE_TYPE_HAILO8;
+ }
+ else if (architecture.value() == HAILO_ARCH_HAILO15) {
+ firmware_type = FIRMWARE_TYPE_HAILO15;
+ }
+ else {
+ LOGGER__ERROR("Invalid device arcitecture. {}", architecture.value());
+ return make_unexpected(HAILO_INVALID_DEVICE_ARCHITECTURE);
+ }
+
+ return Expected<firmware_type_t>(firmware_type);
+}
+
+hailo_status DeviceBase::firmware_update(const MemoryView &firmware_binary, bool should_reset)
+{
+ HAILO_COMMON_STATUS_t fw_header_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ hailo_status status = HAILO_UNINITIALIZED;
+ firmware_version_t *current_fw_version = NULL;
+ firmware_version_t new_app_fw_version = {};
+ firmware_version_t new_core_fw_version = {};
+ uint32_t offset = 0;
+ uint32_t chunk_size = 0;
+ MD5_CTX md5_ctx = {};
+ MD5_SUM_t md5_sum = {};
+ firmware_header_t *new_app_firmware_header = NULL;
+ firmware_header_t *new_core_firmware_header = NULL;
+
+ MD5_Init(&md5_ctx);
+ MD5_Update(&md5_ctx, firmware_binary.data(), firmware_binary.size());
+ MD5_Final(md5_sum, &md5_ctx);
+
+ const auto firmware_type = get_fw_type();
+ CHECK_EXPECTED_AS_STATUS(firmware_type);
+
+ fw_header_status = FIRMWARE_HEADER_UTILS__validate_fw_headers((uintptr_t) firmware_binary.data(), static_cast<uint32_t>(firmware_binary.size()), false,
+ &new_app_firmware_header, &new_core_firmware_header, NULL, firmware_type.value());
+ CHECK(HAILO_COMMON_STATUS__SUCCESS == fw_header_status, HAILO_INVALID_FIRMWARE,
+ "FW update validation failed with status {}", fw_header_status);
+
+ // TODO: Are we ok with doing another identify here?
+ auto board_info_before_update_expected = Control::identify(*this);
+ CHECK_EXPECTED_AS_STATUS(board_info_before_update_expected);
+ hailo_device_identity_t board_info_before_update = board_info_before_update_expected.release();
+
+ if (board_info_before_update.device_architecture != HAILO_ARCH_HAILO8_A0) {
+ if ((new_app_firmware_header->firmware_major != new_core_firmware_header->firmware_major) ||
+ (new_app_firmware_header->firmware_minor != new_core_firmware_header->firmware_minor) ||
+ (GET_REVISION_NUMBER_VALUE(new_app_firmware_header->firmware_revision) != GET_REVISION_NUMBER_VALUE(new_core_firmware_header->firmware_revision))) {
+ LOGGER__ERROR("FW versions mismatch between APP and CORE firmwares.");
+ return HAILO_INVALID_FIRMWARE;
+ }
+ }
+
+ new_app_fw_version.firmware_major = new_app_firmware_header->firmware_major;
+ new_app_fw_version.firmware_minor = new_app_firmware_header->firmware_minor;
+ new_app_fw_version.firmware_revision = new_app_firmware_header->firmware_revision;
+
+ new_core_fw_version.firmware_major = new_core_firmware_header->firmware_major;
+ new_core_fw_version.firmware_minor = new_core_firmware_header->firmware_minor;
+ new_core_fw_version.firmware_revision = new_core_firmware_header->firmware_revision;
+
+ status = validate_fw_version_for_platform(board_info_before_update, new_app_fw_version, FW_BINARY_TYPE_APP_FIRMWARE);
+ CHECK_SUCCESS(status, "Invalid APP firmware binary was supplied");
+ status = validate_fw_version_for_platform(board_info_before_update, new_core_fw_version, FW_BINARY_TYPE_CORE_FIRMWARE);
+ CHECK_SUCCESS(status, "Invalid CORE firmware binary was supplied");
+
+ if (IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(new_app_firmware_header->firmware_revision) ||
+ IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(new_core_firmware_header->firmware_revision)) {
+ LOGGER__ERROR("Can't update to \"extended context switch buffer\" firmware (no ethernet support).");
+ return HAILO_INVALID_FIRMWARE;
+ }
+
+ // TODO: Fix cast, we are assuming they are the same (HRT-3177)
+ current_fw_version = reinterpret_cast<firmware_version_t*>(&(board_info_before_update.fw_version));
+
+ LOGGER__INFO("Current Version: {}.{}.{}{}. Updating to version: {}.{}.{}{}", current_fw_version->firmware_major,
+ current_fw_version->firmware_minor, current_fw_version->firmware_revision,
+ DEV_STRING_NOTE(board_info_before_update.is_release),
+ new_app_fw_version.firmware_major, new_app_fw_version.firmware_minor,
+ GET_REVISION_NUMBER_VALUE(new_app_fw_version.firmware_revision),
+ DEV_STRING_NOTE((!IS_REVISION_DEV(new_app_fw_version.firmware_revision))));
+
+
+ if (IS_REVISION_DEV(new_app_fw_version.firmware_revision)) {
+ LOGGER__INFO("New firmware version is a develop version, and may be unstable!");
+ }
+
+ if (FIRMWARE_HEADER_UTILS__is_binary_being_downgraded(current_fw_version, &new_app_fw_version)) {
+ LOGGER__INFO("Firmware is being downgraded.");
+ }
+
+ status = Control::start_firmware_update(*this);
+ CHECK_SUCCESS(status);
+ LOGGER__INFO("Update started.");
+
+ while (offset < firmware_binary.size()) {
+ chunk_size = MIN(WRITE_CHUNK_SIZE, (static_cast<uint32_t>(firmware_binary.size()) - offset));
+ LOGGER__DEBUG("Writing {} of data to offset {} / {}", chunk_size, offset, firmware_binary.size());
+ status = Control::write_firmware_update(*this, offset, firmware_binary.data() + offset, chunk_size);
+ CHECK_SUCCESS(status);
+ offset += chunk_size;
+ }
+ LOGGER__INFO("Finished writing.");
+
+ status = Control::validate_firmware_update(*this, &md5_sum, static_cast<uint32_t>(firmware_binary.size()));
+ CHECK_SUCCESS(status);
+
+ LOGGER__INFO("Firmware validation done.");
+
+ status = Control::finish_firmware_update(*this);
+ CHECK_SUCCESS(status);
+ LOGGER__INFO("Firmware update finished.");
+
+ if (should_reset) {
+ LOGGER__INFO("Resetting...");
+ status = reset(get_default_reset_mode());
+ CHECK(HAILO_COMMON_STATUS__SUCCESS == fw_header_status, HAILO_INVALID_FIRMWARE,
+ "FW update validation failed with status {}", fw_header_status);
+ CHECK((status == HAILO_SUCCESS) || (status == HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION), status);
+
+ auto board_info_after_install_expected = Control::identify(*this);
+ if (board_info_after_install_expected.status() == HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION) {
+ LOGGER__INFO("Successfully updated firmware. Protocol version has changed so firmware cannot be specified");
+ return HAILO_SUCCESS;
+ }
+
+ CHECK_EXPECTED_AS_STATUS(board_info_after_install_expected);
+ hailo_device_identity_t board_info_after_install = board_info_after_install_expected.release();
+
+ LOGGER__INFO("New App FW version: {}.{}.{}{}", board_info_after_install.fw_version.major, board_info_after_install.fw_version.minor,
+ board_info_after_install.fw_version.revision, DEV_STRING_NOTE(board_info_after_install.is_release));
+
+ // Validating that the new fw version is as expected
+ if ((board_info_after_install.fw_version.major != new_app_fw_version.firmware_major) ||
+ (board_info_after_install.fw_version.minor != new_app_fw_version.firmware_minor) ||
+ (GET_REVISION_NUMBER_VALUE(board_info_after_install.fw_version.revision) != GET_REVISION_NUMBER_VALUE(new_app_fw_version.firmware_revision))) {
+ LOGGER__WARNING("New App FW version is different than expected!");
+ }
+
+ if (board_info_after_install.device_architecture != HAILO_ARCH_HAILO8_A0) {
+ hailo_core_information_t core_info_after_install{};
+ status = Control::core_identify(*this, &core_info_after_install);
+ CHECK_SUCCESS(status);
+ LOGGER__INFO("New Core FW version: {}.{}.{}{}", core_info_after_install.fw_version.major, core_info_after_install.fw_version.minor,
+ core_info_after_install.fw_version.revision, DEV_STRING_NOTE(core_info_after_install.is_release));
+ if ((core_info_after_install.fw_version.major != new_app_fw_version.firmware_major) ||
+ (core_info_after_install.fw_version.minor != new_app_fw_version.firmware_minor) ||
+ (GET_REVISION_NUMBER_VALUE(core_info_after_install.fw_version.revision) != GET_REVISION_NUMBER_VALUE(new_app_fw_version.firmware_revision))) {
+ LOGGER__WARNING("New Core FW version is different than expected!");
+ }
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status DeviceBase::second_stage_update(uint8_t* second_stage_binary, uint32_t second_stage_binary_length)
+{
+ HAILO_COMMON_STATUS_t second_stage_header_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ hailo_status status = HAILO_UNINITIALIZED;
+ firmware_version_t new_second_stage_version = {};
+ firmware_version_t minimum_second_stage_version = {1, 1, 0};
+ uint32_t offset = 0;
+ uint32_t chunk_size = 0;
+ MD5_CTX md5_ctx = {};
+ MD5_SUM_t md5_sum = {};
+ firmware_header_t *new_second_stage_header = NULL;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(second_stage_binary);
+
+ MD5_Init(&md5_ctx);
+ MD5_Update(&md5_ctx, second_stage_binary, second_stage_binary_length);
+ MD5_Final(md5_sum, &md5_ctx);
+
+ const auto firmware_type = get_fw_type();
+ CHECK_EXPECTED_AS_STATUS(firmware_type);
+
+ second_stage_header_status = FIRMWARE_HEADER_UTILS__validate_second_stage_headers((uintptr_t) second_stage_binary,
+ second_stage_binary_length, &new_second_stage_header, firmware_type.value());
+ CHECK(HAILO_COMMON_STATUS__SUCCESS == second_stage_header_status, HAILO_INVALID_SECOND_STAGE,
+ "Second stage update validation failed with status {}", second_stage_header_status);
+
+ new_second_stage_version.firmware_major = new_second_stage_header->firmware_major;
+ new_second_stage_version.firmware_minor = new_second_stage_header->firmware_minor;
+ new_second_stage_version.firmware_revision = new_second_stage_header->firmware_revision;
+
+ status = validate_binary_version_for_platform(&new_second_stage_version,
+ &minimum_second_stage_version,
+ FW_BINARY_TYPE_SECOND_STAGE_BOOT);
+ CHECK_SUCCESS(status);
+
+ LOGGER__INFO("Updating to version: {}.{}.{}",
+ new_second_stage_version.firmware_major, new_second_stage_version.firmware_minor,
+ GET_REVISION_NUMBER_VALUE(new_second_stage_version.firmware_revision));
+
+ LOGGER__INFO("Writing second stage to internal memory");
+ while (offset < second_stage_binary_length) {
+ chunk_size = MIN(WRITE_CHUNK_SIZE, (second_stage_binary_length - offset));
+ LOGGER__INFO("Writing {} of data to offset {} / {}", chunk_size, offset, second_stage_binary_length);
+ status = Control::write_second_stage_to_internal_memory(*this, offset, second_stage_binary + offset, chunk_size);
+ CHECK_SUCCESS(status);
+ offset += chunk_size;
+ }
+ status = Control::copy_second_stage_to_flash(*this, &md5_sum, second_stage_binary_length);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__CRITICAL("Second stage failed in a critical stage, Please contact Hailo support and DO NOT power off the device");
+ }
+ CHECK_SUCCESS(status);
+
+ LOGGER__INFO("Finished copying second stage to flash.");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status DeviceBase::store_sensor_config(uint32_t section_index, hailo_sensor_types_t sensor_type,
+ uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
+ const std::string &config_file_path, const std::string &config_name)
+{
+ CHECK((section_index <= MAX_NON_ISP_SECTIONS), HAILO_INVALID_ARGUMENT,
+ "Cannot store sensor config in invalid section {}. Please choose section index (0-{}).", section_index, MAX_NON_ISP_SECTIONS);
+ CHECK(sensor_type != HAILO_SENSOR_TYPES_HAILO8_ISP, HAILO_INVALID_ARGUMENT,
+ "store_sensor_config intended only for sensor config, for ISP config use store_isp");
+
+ auto control_buffers = SensorConfigUtils::read_config_file(config_file_path);
+ CHECK_EXPECTED_AS_STATUS(control_buffers, "Failed reading config file");
+
+ return store_sensor_control_buffers(control_buffers.value(), section_index, sensor_type,
+ reset_config_size, config_height, config_width, config_fps, config_name);
+}
+
+hailo_status DeviceBase::store_isp_config(uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
+ const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path, const std::string &config_name)
+{
+ auto control_buffers = SensorConfigUtils::read_isp_config_file(isp_static_config_file_path, isp_runtime_config_file_path);
+ CHECK_EXPECTED_AS_STATUS(control_buffers, "Failed reading ISP config file");
+
+ return store_sensor_control_buffers(control_buffers.value(), SENSOR_CONFIG__ISP_SECTION_INDEX, HAILO_SENSOR_TYPES_HAILO8_ISP,
+ reset_config_size, config_height, config_width, config_fps, config_name);
+
+}
+
+Expected<Buffer> DeviceBase::sensor_get_sections_info()
+{
+ auto buffer = Buffer::create(SENSOR_SECTIONS_INFO_SIZE);
+ CHECK_EXPECTED(buffer);
+
+ hailo_status status = Control::sensor_get_sections_info(*this, buffer->data());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return buffer;
+}
+
+hailo_status DeviceBase::sensor_dump_config(uint32_t section_index, const std::string &config_file_path)
+{
+ CHECK(SENSOR_CONFIG__TOTAL_SECTIONS_BLOCK_COUNT > section_index, HAILO_INVALID_ARGUMENT, "Section {} is invalid. Section index must be in the range [0 - {}]", section_index, (SENSOR_CONFIG__TOTAL_SECTIONS_BLOCK_COUNT - 1));
+ auto sections_info_buffer = sensor_get_sections_info();
+ CHECK_EXPECTED_AS_STATUS(sections_info_buffer);
+
+ SENSOR_CONFIG__section_info_t *section_info_ptr = &((SENSOR_CONFIG__section_info_t *)sections_info_buffer->data())[section_index];
+ CHECK(section_info_ptr->is_free == 0, HAILO_NOT_FOUND, "Section {} is not active", section_index);
+ CHECK(0 == (section_info_ptr->config_size % sizeof(SENSOR_CONFIG__operation_cfg_t)), HAILO_INVALID_OPERATION, "Section config size is invalid.");
+
+ /* Read config data from device */
+ auto operation_cfg = Buffer::create(section_info_ptr->config_size);
+ CHECK_EXPECTED_AS_STATUS(operation_cfg);
+
+ size_t read_full_buffer_count = (section_info_ptr->config_size / MAX_CONFIG_ENTRIES_DATA_SIZE);
+ uint32_t residue_to_read = static_cast<uint32_t>(section_info_ptr->config_size - (read_full_buffer_count * MAX_CONFIG_ENTRIES_DATA_SIZE));
+ uint32_t entries_count = (section_info_ptr->config_size / static_cast<uint32_t>(sizeof(SENSOR_CONFIG__operation_cfg_t)));
+ uint32_t offset = 0;
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ for (uint32_t i = 0; i < read_full_buffer_count; i++) {
+ status = Control::sensor_get_config(*this, section_index, offset, (uint32_t)MAX_CONFIG_ENTRIES_DATA_SIZE, (operation_cfg->data() + offset));
+ CHECK_SUCCESS(status);
+ offset += static_cast<uint32_t>(MAX_CONFIG_ENTRIES_DATA_SIZE);
+ }
+ if (0 < residue_to_read) {
+ status = Control::sensor_get_config(*this, section_index, offset, residue_to_read, (operation_cfg->data() + offset));
+ CHECK_SUCCESS(status);
+ }
+
+ status = SensorConfigUtils::dump_config_to_csv((SENSOR_CONFIG__operation_cfg_t*)operation_cfg->data(), config_file_path, entries_count);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status DeviceBase::sensor_set_i2c_bus_index(hailo_sensor_types_t sensor_type, uint32_t bus_index)
+{
+ return Control::sensor_set_i2c_bus_index(*this, sensor_type, bus_index);
+}
+
+hailo_status DeviceBase::sensor_load_and_start_config(uint32_t section_index)
+{
+ CHECK((section_index <= MAX_NON_ISP_SECTIONS), HAILO_INVALID_ARGUMENT,
+ "Cannot load config from invalid section index {}. Please choose section index (0-{}).",
+ section_index, MAX_NON_ISP_SECTIONS);
+ return Control::sensor_load_and_start_config(*this, section_index);
+}
+
+hailo_status DeviceBase::sensor_reset(uint32_t section_index)
+{
+ CHECK((section_index <= MAX_NON_ISP_SECTIONS), HAILO_INVALID_ARGUMENT,
+ "Cannot reset sensor in invalid section index {}. Please choose section index (0-{}).",
+ section_index, MAX_NON_ISP_SECTIONS);
+ return Control::sensor_reset(*this, section_index);
+}
+
+hailo_status DeviceBase::sensor_set_generic_i2c_slave(uint16_t slave_address, uint8_t offset_size, uint8_t bus_index,
+ uint8_t should_hold_bus, uint8_t slave_endianness)
+{
+ return Control::sensor_set_generic_i2c_slave(*this, slave_address, offset_size, bus_index, should_hold_bus, slave_endianness);
+}
+
+Expected<Buffer> DeviceBase::read_board_config()
+{
+ auto result = Buffer::create(BOARD_CONFIG_SIZE, 0);
+ CHECK_EXPECTED(result);
+
+ auto status = Control::read_board_config(*this, result->data(), static_cast<uint32_t>(result->size()));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return result.release();
+}
+
+hailo_status DeviceBase::write_board_config(const MemoryView &buffer)
+{
+ return Control::write_board_config(*this, buffer.data(), static_cast<uint32_t>(buffer.size()));
+}
+
+Expected<hailo_fw_user_config_information_t> DeviceBase::examine_user_config()
+{
+ hailo_fw_user_config_information_t result{};
+ auto status = Control::examine_user_config(*this, &result);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return result;
+}
+
+Expected<Buffer> DeviceBase::read_user_config()
+{
+ auto user_config_info = examine_user_config();
+ CHECK_EXPECTED(user_config_info, "Failed to examine user config");
+
+ auto result = Buffer::create(user_config_info->total_size, 0);
+ CHECK_EXPECTED(result);
+
+ auto status = Control::read_user_config(*this, result->data(), static_cast<uint32_t>(result->size()));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return result.release();
+}
+
+hailo_status DeviceBase::write_user_config(const MemoryView &buffer)
+{
+ return Control::write_user_config(*this, buffer.data(), static_cast<uint32_t>(buffer.size()));
+}
+
+hailo_status DeviceBase::erase_user_config()
+{
+ return Control::erase_user_config(*this);
+}
+
+void DeviceBase::start_d2h_notification_thread(const std::string &device_id)
+{
+ m_d2h_notification_thread = std::thread([this, device_id]() {
+ OsUtils::set_current_thread_name("NOTIFY_PROC");
+ d2h_notification_thread_main(device_id);
+ });
+}
+
+void DeviceBase::stop_d2h_notification_thread()
+{
+ static const D2H_EVENT_MESSAGE_t TERMINATE {{0, 0, 0, 0, TERMINATE_EVENT_ID, 0, 0}, {}};
+ m_d2h_notification_queue.clear();
+ if (m_d2h_notification_thread.joinable()) {
+ m_d2h_notification_queue.push(TERMINATE);
+ m_d2h_notification_thread.join();
+ }
+}
+
+void DeviceBase::d2h_notification_thread_main(const std::string &device_id)
+{
+ while (true) {
+ auto notification = m_d2h_notification_queue.pop();
+ if (notification.header.event_id == TERMINATE_EVENT_ID) {
+ LOGGER__DEBUG("[{}] D2H notification thread got terminate signal, returning..", device_id);
+ return;
+ }
+ /* Parse and print the Event info */
+ auto d2h_status = D2H_EVENTS__parse_event(¬ification);
+ if (HAILO_COMMON_STATUS__SUCCESS != d2h_status) {
+ LOGGER__ERROR("[{}] Fail to Parse firmware notification {} status is {}", device_id, notification.header.event_id, d2h_status);
+ continue;
+ }
+
+ hailo_notification_t callback_notification;
+ uint32_t notification_fw_id = notification.header.event_id;
+ hailo_notification_id_t hailo_notification_id;
+ hailo_status status = fw_notification_id_to_hailo((D2H_EVENT_ID_t)notification_fw_id, &hailo_notification_id);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("[{}] Got invalid notification id from fw: {}", device_id, notification_fw_id);
+ continue;
+ }
+
+ LOGGER__INFO("[{}] Got notification from fw with id: {}", device_id, hailo_notification_id);
+
+ std::shared_ptr<NotificationCallback> callback_func = nullptr;
+ void *callback_opaque = nullptr;
+ {
+ const std::lock_guard<std::mutex> lock(m_callbacks_lock);
+ callback_func = m_d2h_callbacks[hailo_notification_id].func;
+ callback_opaque = m_d2h_callbacks[hailo_notification_id].opaque;
+ // m_callbacks_lock is freed here because user can call to a function in the callback that will
+ // try to acquire it as well - resulting in a dead lock. I did not used recursive_mutex
+ // because of the overhead
+ }
+
+ if (nullptr != callback_func) {
+ callback_notification.id = hailo_notification_id;
+ callback_notification.sequence = notification.header.sequence;
+ static_assert(sizeof(callback_notification.body) == sizeof(notification.message_parameters), "D2H notification size mismatch");
+ memcpy(&callback_notification.body, ¬ification.message_parameters, sizeof(notification.message_parameters));
+ (*callback_func)(*this, callback_notification, callback_opaque);
+ }
+ }
+}
+
+hailo_status DeviceBase::check_hef_is_compatible(Hef &hef)
+{
+ const auto device_arch = get_architecture();
+ CHECK_EXPECTED_AS_STATUS(device_arch, "Can't get device architecture (is the FW loaded?)");
+
+ if (!is_hef_compatible(device_arch.value(), hef.pimpl->get_device_arch())) {
+ auto device_arch_str = HailoRTCommon::get_device_arch_str(device_arch.value());
+ auto hef_arch_str = HailoRTCommon::get_device_arch_str(hef_arch_to_device_arch(hef.pimpl->get_device_arch()));
+
+ LOGGER__ERROR("HEF format is not compatible with device. Device arch: {}, HEF arch: {}",
+ device_arch_str.c_str(), hef_arch_str.c_str());
+ return HAILO_INVALID_HEF;
+ }
+
+ // TODO: MSW-227 check clock rate for hailo15 as well.
+ if ((HAILO_ARCH_HAILO8 == device_arch.value()) || (HAILO_ARCH_HAILO8L == device_arch.value())) {
+ auto extended_device_info_expected = Control::get_extended_device_information(*this);
+ CHECK_EXPECTED_AS_STATUS(extended_device_info_expected, "Can't get device extended info");
+ hailo_extended_device_information_t extended_device_information = extended_device_info_expected.release();
+ check_clock_rate_for_hailo8(extended_device_information.neural_network_core_clock_rate,
+ hef.pimpl->get_device_arch());
+ }
+
+ if ((ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == hef.pimpl->get_device_arch()) && (HAILO_ARCH_HAILO8 == device_arch.value())) {
+ LOGGER__WARNING(
+ "HEF was compiled for Hailo8L device, while the device itself is Hailo8. " \
+ "This will result in lower performance.");
+ }
+
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status DeviceBase::fw_notification_id_to_hailo(D2H_EVENT_ID_t fw_notification_id,
+ hailo_notification_id_t* hailo_notification_id)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ switch (fw_notification_id) {
+ case ETHERNET_SERVICE_RX_ERROR_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_ETHERNET_RX_ERROR;
+ break;
+ case D2H_HOST_INFO_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_DEBUG;
+ break;
+ case HEALTH_MONITOR_TEMPERATURE_ALARM_D2H_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_TEMPERATURE_ALARM;
+ break;
+ case HEALTH_MONITOR_CLOSED_STREAMS_D2H_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_DATAFLOW_SHUTDOWN;
+ break;
+ case HEALTH_MONITOR_OVERCURRENT_PROTECTION_ALERT_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM;
+ break;
+ case HEALTH_MONITOR_LCU_ECC_CORRECTABLE_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_LCU_ECC_CORRECTABLE_ERROR;
+ break;
+ case HEALTH_MONITOR_LCU_ECC_UNCORRECTABLE_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_LCU_ECC_UNCORRECTABLE_ERROR;
+ break;
+ case HEALTH_MONITOR_CPU_ECC_ERROR_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_CPU_ECC_ERROR;
+ break;
+ case HEALTH_MONITOR_CPU_ECC_FATAL_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_CPU_ECC_FATAL;
+ break;
+ case CONTEXT_SWITCH_BREAKPOINT_REACHED:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_CONTEXT_SWITCH_BREAKPOINT_REACHED;
+ break;
+ case HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID:
+ *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_CLOCK_CHANGED_EVENT;
+ break;
+ default:
+ status = HAILO_INVALID_ARGUMENT;
+ goto l_exit;
+ }
+
+ status = HAILO_SUCCESS;
+l_exit:
+ return status;
+}
+
+hailo_status DeviceBase::validate_binary_version_for_platform(firmware_version_t *new_binary_version,
+ firmware_version_t *min_supported_binary_version, FW_BINARY_TYPE_t fw_binary_type)
+{
+ HAILO_COMMON_STATUS_t binary_status = FIRMWARE_HEADER_UTILS__validate_binary_version(new_binary_version, min_supported_binary_version,
+ fw_binary_type);
+ CHECK(HAILO_COMMON_STATUS__SUCCESS == binary_status, HAILO_INVALID_FIRMWARE,
+ "FW binary version validation failed with status {}", binary_status);
+ return HAILO_SUCCESS;
+}
+
+hailo_status DeviceBase::validate_fw_version_for_platform(const hailo_device_identity_t &board_info, firmware_version_t fw_version, FW_BINARY_TYPE_t fw_binary_type)
+{
+ firmware_version_t min_supported_fw_version = {0, 0, 0};
+ const firmware_version_t evb_mdot2_min_version = {2, 1, 0};
+ const firmware_version_t mpcie_min_version = {2, 2, 0};
+
+ if (0 == strncmp(EVB_PART_NUMBER_PREFIX, board_info.part_number, PART_NUMBER_PREFIX_LENGTH) ||
+ 0 == strncmp(MDOT2_PART_NUMBER_PREFIX, board_info.part_number, PART_NUMBER_PREFIX_LENGTH)) {
+ min_supported_fw_version = evb_mdot2_min_version;
+ }
+
+ else if (0 == strncmp(MPCIE_PART_NUMBER_PREFIX, board_info.part_number, PART_NUMBER_PREFIX_LENGTH)) {
+ min_supported_fw_version = mpcie_min_version;
+ }
+ else {
+ min_supported_fw_version = evb_mdot2_min_version;
+ }
+
+ return validate_binary_version_for_platform(&fw_version, &min_supported_fw_version, fw_binary_type);
+}
+
+bool DeviceBase::is_hef_compatible(hailo_device_architecture_t device_arch, ProtoHEFHwArch hef_arch)
+{
+ switch (device_arch) {
+ case HAILO_ARCH_HAILO8:
+ return (hef_arch == PROTO__HW_ARCH__HAILO8P) || (hef_arch == PROTO__HW_ARCH__HAILO8R) || (hef_arch == PROTO__HW_ARCH__HAILO8L);
+ case HAILO_ARCH_HAILO8L:
+ return (hef_arch == PROTO__HW_ARCH__HAILO8L);
+ case HAILO_ARCH_HAILO15:
+ // Compare with HW_ARCH__LAVENDER and HW_ARCH__GINGER to support hefs compiled for them
+ return (hef_arch == PROTO__HW_ARCH__GINGER) || (hef_arch == PROTO__HW_ARCH__LAVENDER) ||
+ (hef_arch == PROTO__HW_ARCH__HAILO15H);
+ default:
+ return false;
+ }
+}
+
+hailo_device_architecture_t DeviceBase::hef_arch_to_device_arch(ProtoHEFHwArch hef_arch)
+{
+ switch (hef_arch) {
+ case PROTO__HW_ARCH__SAGE_A0:
+ return HAILO_ARCH_HAILO8_A0;
+ case PROTO__HW_ARCH__HAILO8:
+ case PROTO__HW_ARCH__HAILO8P:
+ case PROTO__HW_ARCH__HAILO8R:
+ case PROTO__HW_ARCH__SAGE_B0:
+ case PROTO__HW_ARCH__PAPRIKA_B0:
+ return HAILO_ARCH_HAILO8;
+ case PROTO__HW_ARCH__HAILO8L:
+ return HAILO_ARCH_HAILO8L;
+ case PROTO__HW_ARCH__HAILO15H:
+ case PROTO__HW_ARCH__GINGER:
+ case PROTO__HW_ARCH__LAVENDER:
+ return HAILO_ARCH_HAILO15;
+
+ default:
+ return HAILO_ARCH_MAX_ENUM;
+ }
+}
+
+void DeviceBase::check_clock_rate_for_hailo8(uint32_t clock_rate, ProtoHEFHwArch hef_hw_arch)
+{
+ uint32_t expected_clock_rate = (hef_hw_arch == ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8R) ? HAILO8R_CLOCK_RATE : HAILO8_CLOCK_RATE;
+ if (expected_clock_rate != clock_rate) {
+ LOGGER__WARNING(
+ "HEF was compiled assuming clock rate of {} MHz, while the device clock rate is {} MHz. " \
+ "FPS calculations might not be accurate.",
+ (expected_clock_rate / CLOCKS_IN_MHZ),
+ (clock_rate / CLOCKS_IN_MHZ));
+ }
+}
+
+hailo_status DeviceBase::store_sensor_control_buffers(const std::vector<SENSOR_CONFIG__operation_cfg_t> &control_buffers, uint32_t section_index, hailo_sensor_types_t sensor_type,
+ uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps, const std::string &config_name)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ uint32_t total_data_size = static_cast<uint32_t>(control_buffers.size() * sizeof(control_buffers[0]));
+ size_t config_info_full_buffer = control_buffers.size() / MAX_CONFIG_INFO_ENTRIES;
+ uint32_t is_first = 1;
+ uint32_t offset = 0;
+
+ for(uint32_t i = 0; i < config_info_full_buffer; i++) {
+ status = Control::sensor_store_config(*this, is_first, section_index, offset, reset_config_size, sensor_type, total_data_size,
+ (uint8_t*)control_buffers.data() + offset, (uint32_t)MAX_CONFIG_ENTRIES_DATA_SIZE,
+ config_height, config_width, config_fps, static_cast<uint32_t>(config_name.size()), (uint8_t*)config_name.c_str());
+ CHECK_SUCCESS(status, "Failed to store sensor config");
+
+ offset += (uint32_t)MAX_CONFIG_ENTRIES_DATA_SIZE;
+ is_first = 0;
+ }
+
+ if (offset < total_data_size) {
+ status = Control::sensor_store_config(*this, is_first, section_index, offset, reset_config_size, sensor_type, total_data_size,
+ (uint8_t*)control_buffers.data() + offset, total_data_size - offset,
+ config_height, config_width, config_fps, static_cast<uint32_t>(config_name.size()), (uint8_t*)config_name.c_str());
+ CHECK_SUCCESS(status,"Failed to store sensor config");
+ }
+
+ return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file device_internal.hpp
+ * @brief Class declaration for DeviceBase that implements the basic Device "interface" (not technically
+ * an interface, but good enough). All internal devices should inherit from the DeviceBase class.
+ * Hence, the hierarchy is as follows:
+ *
+ * Device (External "interface")
+ * |-- BaseDevice (Base classes)
+ * |-- VdmaDevice
+ * | |-- PcieDevice
+ * | |-- IntegratedDevice
+ * |-- EthernetDevice
+ **/
+
+#ifndef _HAILO_DEVICE_INTERNAL_HPP_
+#define _HAILO_DEVICE_INTERNAL_HPP_
+
+#include "hailo/device.hpp"
+#include "hailo/hailort.h"
+
+#include "d2h_event_queue.hpp"
+#include "hef/hef_internal.hpp"
+
+#include "firmware_header.h"
+#include "firmware_header_utils.h"
+#include "control_protocol.h"
+#include <thread>
+
+
+namespace hailort
+{
+
+#define EVB_PART_NUMBER_PREFIX ("HEV18B1C4GA")
+#define MDOT2_PART_NUMBER_PREFIX ("HM218B1C2FA")
+#define MPCIE_PART_NUMBER_PREFIX ("HMP1RB1C2GA")
+
+// Will be used to perfrom generic validation for all variations of a specific module
+#define PART_NUMBER_PREFIX_LENGTH (11)
+
+#define CLOCKS_IN_MHZ (1000 * 1000)
+
+class DeviceBase : public Device
+{
+public:
+ DeviceBase(Type type);
+ DeviceBase(DeviceBase &&) = delete;
+ DeviceBase(const DeviceBase &) = delete;
+ DeviceBase &operator=(DeviceBase &&) = delete;
+ DeviceBase &operator=(const DeviceBase &) = delete;
+ virtual ~DeviceBase();
+
+ virtual Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params={}) override;
+ virtual hailo_status reset(hailo_reset_device_mode_t mode) override;
+ virtual hailo_status set_notification_callback(const NotificationCallback &func, hailo_notification_id_t notification_id, void *opaque) override;
+ virtual hailo_status remove_notification_callback(hailo_notification_id_t notification_id) override;
+ virtual void activate_notifications(const std::string &device_id);
+ virtual void start_notification_fetch_thread(D2hEventQueue *write_queue);
+ virtual hailo_status stop_notification_fetch_thread();
+ virtual hailo_status firmware_update(const MemoryView &firmware_binary, bool should_reset) override;
+ virtual hailo_status second_stage_update(uint8_t *second_stage_binary, uint32_t second_stage_binary_length) override;
+ virtual hailo_status store_sensor_config(uint32_t section_index, hailo_sensor_types_t sensor_type,
+ uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
+ const std::string &config_file_path, const std::string &config_name) override;
+ virtual hailo_status store_isp_config(uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
+ const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path, const std::string &config_name) override;
+ virtual Expected<Buffer> sensor_get_sections_info() override;
+ virtual hailo_status sensor_dump_config(uint32_t section_index, const std::string &config_file_path) override;
+ virtual hailo_status sensor_set_i2c_bus_index(hailo_sensor_types_t sensor_type, uint32_t bus_index) override;
+ virtual hailo_status sensor_load_and_start_config(uint32_t section_index) override;
+ virtual hailo_status sensor_reset(uint32_t section_index) override;
+ virtual hailo_status sensor_set_generic_i2c_slave(uint16_t slave_address, uint8_t offset_size, uint8_t bus_index,
+ uint8_t should_hold_bus, uint8_t slave_endianness) override;
+ virtual Expected<Buffer> read_board_config() override;
+ virtual hailo_status write_board_config(const MemoryView &buffer) override;
+ virtual Expected<hailo_fw_user_config_information_t> examine_user_config() override;
+ virtual Expected<Buffer> read_user_config() override;
+ virtual hailo_status write_user_config(const MemoryView &buffer) override;
+ virtual hailo_status erase_user_config() override;
+ static hailo_device_architecture_t hef_arch_to_device_arch(ProtoHEFHwArch hef_arch);
+
+protected:
+ struct NotificationThreadSharedParams {
+ NotificationThreadSharedParams() : is_running(false) {}
+ D2hEventQueue *write_queue;
+ bool is_running;
+ };
+
+ // Special value to signal the d2h notification thread to terminate
+ static const uint32_t TERMINATE_EVENT_ID = std::numeric_limits<uint32_t>::max();
+
+ virtual hailo_reset_device_mode_t get_default_reset_mode() = 0;
+ virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) = 0;
+ virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() = 0;
+ virtual hailo_status disable_notifications() = 0;
+ void start_d2h_notification_thread(const std::string &device_id);
+ void stop_d2h_notification_thread();
+ void d2h_notification_thread_main(const std::string &device_id);
+ hailo_status check_hef_is_compatible(Hef &hef);
+
+ virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) = 0;
+
+ D2hEventQueue m_d2h_notification_queue;
+ std::thread m_d2h_notification_thread;
+ std::thread m_notification_fetch_thread;
+ std::shared_ptr<NotificationThreadSharedParams> m_notif_fetch_thread_params;
+
+private:
+ friend class VDeviceBase;
+
+ static hailo_status fw_notification_id_to_hailo(D2H_EVENT_ID_t fw_notification_id,
+ hailo_notification_id_t* hailo_notification_id);
+ static hailo_status validate_binary_version_for_platform(firmware_version_t *new_binary_version,
+ firmware_version_t *min_supported_binary_version, FW_BINARY_TYPE_t fw_binary_type);
+ static hailo_status validate_fw_version_for_platform(const hailo_device_identity_t &board_info,
+ firmware_version_t fw_version, FW_BINARY_TYPE_t fw_binary_type);
+ static bool is_hef_compatible(hailo_device_architecture_t device_arch, ProtoHEFHwArch hw_arch);
+ static void check_clock_rate_for_hailo8(uint32_t clock_rate, ProtoHEFHwArch hef_hw_arch);
+ hailo_status store_sensor_control_buffers(const std::vector<SENSOR_CONFIG__operation_cfg_t> &control_buffers, uint32_t section_index, hailo_sensor_types_t sensor_type,
+ uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps, const std::string &config_name);
+ virtual void notification_fetch_thread(std::shared_ptr<NotificationThreadSharedParams> params);
+ Expected<firmware_type_t> get_fw_type();
+
+ typedef struct {
+ std::shared_ptr<NotificationCallback> func;
+ void *opaque;
+ } d2h_notification_callback_t;
+
+ d2h_notification_callback_t m_d2h_callbacks[HAILO_NOTIFICATION_ID_COUNT];
+ std::mutex m_callbacks_lock;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_DEVICE_INTERNAL_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file device_internal.cpp
- * @brief Implementation of DeviceBase class
- **/
-
-#include "device_internal.hpp"
-#include "hailo/hailort.h"
-#include "control.hpp"
-#include "sensor_config_utils.hpp"
-
-namespace hailort
-{
-
-DeviceBase::DeviceBase(Type type) :
- Device::Device(type),
- m_d2h_notification_queue(),
- m_d2h_notification_thread(),
- m_notif_fetch_thread_params(make_shared_nothrow<NotificationThreadSharedParams>()),
- m_d2h_callbacks{{0,0}},
- m_callbacks_lock()
- // TODO: Handle m_notif_fetch_thread_params null pointer
-{
-#ifndef NDEBUG
- LOGGER__WARNING("libhailort is running in \"debug\" mode. Overall performance might be affected!");
-#endif
-#ifdef HAILO_EMULATOR
- LOGGER__WARNING("libhailort is running in \"Emulator\" mode.");
-#endif
-}
-
-DeviceBase::~DeviceBase()
-{
- stop_d2h_notification_thread();
-}
-
-Expected<ConfiguredNetworkGroupVector> DeviceBase::configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params)
-{
- auto start_time = std::chrono::steady_clock::now();
-
- auto status = check_hef_is_compatible(hef);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto network_groups = add_hef(hef, configure_params);
- CHECK_EXPECTED(network_groups);
-
- auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
- LOGGER__INFO("Configuring HEF took {} milliseconds", elapsed_time_ms);
-
- return network_groups;
-}
-
-hailo_status DeviceBase::reset(hailo_reset_device_mode_t mode)
-{
- CONTROL_PROTOCOL__reset_type_t reset_type = CONTROL_PROTOCOL__RESET_TYPE__COUNT; // set invalid value
- switch(mode) {
- case HAILO_RESET_DEVICE_MODE_CHIP:
- reset_type = CONTROL_PROTOCOL__RESET_TYPE__CHIP;
- break;
- case HAILO_RESET_DEVICE_MODE_NN_CORE:
- reset_type = CONTROL_PROTOCOL__RESET_TYPE__NN_CORE;
- break;
- case HAILO_RESET_DEVICE_MODE_SOFT:
- reset_type = CONTROL_PROTOCOL__RESET_TYPE__SOFT;
- break;
- case HAILO_RESET_DEVICE_MODE_FORCED_SOFT:
- reset_type = CONTROL_PROTOCOL__RESET_TYPE__FORCED_SOFT;
- break;
- default:
- return HAILO_INVALID_ARGUMENT;
- }
- return reset_impl(reset_type);
-}
-
-hailo_status DeviceBase::set_notification_callback(const NotificationCallback &func, hailo_notification_id_t notification_id, void *opaque)
-{
- CHECK((0 <= notification_id) && (HAILO_NOTIFICATION_ID_COUNT > notification_id), HAILO_INVALID_ARGUMENT,
- "Notification id value is invalid");
- CHECK_ARG_NOT_NULL(func);
-
- auto func_ptr = make_shared_nothrow<NotificationCallback>(func);
- CHECK_NOT_NULL(func_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- const std::lock_guard<std::mutex> lock(m_callbacks_lock);
- m_d2h_callbacks[notification_id].func = func_ptr;
- m_d2h_callbacks[notification_id].opaque = opaque;
- return HAILO_SUCCESS;
-}
-
-hailo_status DeviceBase::remove_notification_callback(hailo_notification_id_t notification_id)
-{
- CHECK((0 <= notification_id) && (HAILO_NOTIFICATION_ID_COUNT > notification_id), HAILO_INVALID_ARGUMENT,
- "Notification id value is invalid");
-
- const std::lock_guard<std::mutex> lock(m_callbacks_lock);
- m_d2h_callbacks[notification_id].func = nullptr;
- m_d2h_callbacks[notification_id].opaque = nullptr;
-
- return HAILO_SUCCESS;
-}
-
-void DeviceBase::activate_notifications(const std::string &device_id)
-{
- this->start_d2h_notification_thread(device_id);
- this->start_notification_fetch_thread(&m_d2h_notification_queue);
-}
-
-hailo_status DeviceBase::stop_notification_fetch_thread()
-{
- hailo_status status = HAILO_SUCCESS; // best effort
-
- if (m_notif_fetch_thread_params->is_running) {
- m_notif_fetch_thread_params->is_running = false;
- auto disable_status = this->disable_notifications();
- if (HAILO_SUCCESS != disable_status) {
- status = disable_status;
- LOGGER__WARNING("Failed disabling notifications using ioctl command");
- }
- }
-
- // join thread even if disable_notifications failed - so we don't have non-joined thread
- if (m_notification_fetch_thread.joinable()) {
- m_notification_fetch_thread.join();
- }
-
- return status;
-}
-
-void DeviceBase::start_notification_fetch_thread(D2hEventQueue *write_queue)
-{
- m_notif_fetch_thread_params->write_queue = write_queue;
- m_notif_fetch_thread_params->is_running = true;
- m_notification_fetch_thread = std::thread(&DeviceBase::notification_fetch_thread, this, m_notif_fetch_thread_params);
-}
-
-void DeviceBase::notification_fetch_thread(std::shared_ptr<NotificationThreadSharedParams> params)
-{
- while (params->is_running) {
- auto expected_notification = this->read_notification();
- if (HAILO_SUCCESS != expected_notification.status()) {
- if (params->is_running) {
- LOGGER__ERROR("Read notification failed with status={}", expected_notification.status());
- }
- break;
- }
- params->write_queue->push(expected_notification.release());
- }
-}
-
-Expected<firmware_type_t> DeviceBase::get_fw_type()
-{
- firmware_type_t firmware_type;
- const auto architecture = get_architecture();
- CHECK_EXPECTED(architecture);
-
- if ((architecture.value() == HAILO_ARCH_HAILO8) || (architecture.value() == HAILO_ARCH_HAILO8L)) {
- firmware_type = FIRMWARE_TYPE_HAILO8;
- }
- else if (architecture.value() == HAILO_ARCH_MERCURY_CA || architecture.value() == HAILO_ARCH_MERCURY_VPU) {
- firmware_type = FIRMWARE_TYPE_MERCURY;
- }
- else {
- LOGGER__ERROR("Invalid device arcitecture. {}", architecture.value());
- return make_unexpected(HAILO_INVALID_DEVICE_ARCHITECTURE);
- }
-
- return Expected<firmware_type_t>(firmware_type);
-}
-
-hailo_status DeviceBase::firmware_update(const MemoryView &firmware_binary, bool should_reset)
-{
- HAILO_COMMON_STATUS_t fw_header_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- hailo_status status = HAILO_UNINITIALIZED;
- firmware_version_t *current_fw_version = NULL;
- firmware_version_t new_app_fw_version = {};
- firmware_version_t new_core_fw_version = {};
- uint32_t offset = 0;
- uint32_t chunk_size = 0;
- MD5_CTX md5_ctx = {};
- MD5_SUM_t md5_sum = {};
- firmware_header_t *new_app_firmware_header = NULL;
- firmware_header_t *new_core_firmware_header = NULL;
-
- MD5_Init(&md5_ctx);
- MD5_Update(&md5_ctx, firmware_binary.data(), firmware_binary.size());
- MD5_Final(md5_sum, &md5_ctx);
-
- const auto firmware_type = get_fw_type();
- CHECK_EXPECTED_AS_STATUS(firmware_type);
-
- fw_header_status = FIRMWARE_HEADER_UTILS__validate_fw_headers((uintptr_t) firmware_binary.data(), static_cast<uint32_t>(firmware_binary.size()), false,
- &new_app_firmware_header, &new_core_firmware_header, NULL, firmware_type.value());
- CHECK(HAILO_COMMON_STATUS__SUCCESS == fw_header_status, HAILO_INVALID_FIRMWARE,
- "FW update validation failed with status {}", fw_header_status);
-
- // TODO: Are we ok with doing another identify here?
- auto board_info_before_update_expected = Control::identify(*this);
- CHECK_EXPECTED_AS_STATUS(board_info_before_update_expected);
- hailo_device_identity_t board_info_before_update = board_info_before_update_expected.release();
-
- if (board_info_before_update.device_architecture != HAILO_ARCH_HAILO8_A0) {
- if ((new_app_firmware_header->firmware_major != new_core_firmware_header->firmware_major) ||
- (new_app_firmware_header->firmware_minor != new_core_firmware_header->firmware_minor) ||
- (GET_REVISION_NUMBER_VALUE(new_app_firmware_header->firmware_revision) != GET_REVISION_NUMBER_VALUE(new_core_firmware_header->firmware_revision))) {
- LOGGER__ERROR("FW versions mismatch between APP and CORE firmwares.");
- return HAILO_INVALID_FIRMWARE;
- }
- }
-
- new_app_fw_version.firmware_major = new_app_firmware_header->firmware_major;
- new_app_fw_version.firmware_minor = new_app_firmware_header->firmware_minor;
- new_app_fw_version.firmware_revision = new_app_firmware_header->firmware_revision;
-
- new_core_fw_version.firmware_major = new_core_firmware_header->firmware_major;
- new_core_fw_version.firmware_minor = new_core_firmware_header->firmware_minor;
- new_core_fw_version.firmware_revision = new_core_firmware_header->firmware_revision;
-
- status = validate_fw_version_for_platform(board_info_before_update, new_app_fw_version, FW_BINARY_TYPE_APP_FIRMWARE);
- CHECK_SUCCESS(status, "Invalid APP firmware binary was supplied");
- status = validate_fw_version_for_platform(board_info_before_update, new_core_fw_version, FW_BINARY_TYPE_CORE_FIRMWARE);
- CHECK_SUCCESS(status, "Invalid CORE firmware binary was supplied");
-
- if (IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(new_app_firmware_header->firmware_revision) ||
- IS_REVISION_EXTENDED_CONTEXT_SWITCH_BUFFER(new_core_firmware_header->firmware_revision)) {
- LOGGER__ERROR("Can't update to \"extended context switch buffer\" firmware (no ethernet support).");
- return HAILO_INVALID_FIRMWARE;
- }
-
- // TODO: Fix cast, we are assuming they are the same (HRT-3177)
- current_fw_version = reinterpret_cast<firmware_version_t*>(&(board_info_before_update.fw_version));
-
- LOGGER__INFO("Current Version: {}.{}.{}{}. Updating to version: {}.{}.{}{}", current_fw_version->firmware_major,
- current_fw_version->firmware_minor, current_fw_version->firmware_revision,
- DEV_STRING_NOTE(board_info_before_update.is_release),
- new_app_fw_version.firmware_major, new_app_fw_version.firmware_minor,
- GET_REVISION_NUMBER_VALUE(new_app_fw_version.firmware_revision),
- DEV_STRING_NOTE((!IS_REVISION_DEV(new_app_fw_version.firmware_revision))));
-
-
- if (IS_REVISION_DEV(new_app_fw_version.firmware_revision)) {
- LOGGER__INFO("New firmware version is a develop version, and may be unstable!");
- }
-
- if (FIRMWARE_HEADER_UTILS__is_binary_being_downgraded(current_fw_version, &new_app_fw_version)) {
- LOGGER__INFO("Firmware is being downgraded.");
- }
-
- status = Control::start_firmware_update(*this);
- CHECK_SUCCESS(status);
- LOGGER__INFO("Update started.");
-
- while (offset < firmware_binary.size()) {
- chunk_size = MIN(WRITE_CHUNK_SIZE, (static_cast<uint32_t>(firmware_binary.size()) - offset));
- LOGGER__DEBUG("Writing {} of data to offset {} / {}", chunk_size, offset, firmware_binary.size());
- status = Control::write_firmware_update(*this, offset, firmware_binary.data() + offset, chunk_size);
- CHECK_SUCCESS(status);
- offset += chunk_size;
- }
- LOGGER__INFO("Finished writing.");
-
- status = Control::validate_firmware_update(*this, &md5_sum, static_cast<uint32_t>(firmware_binary.size()));
- CHECK_SUCCESS(status);
-
- LOGGER__INFO("Firmware validation done.");
-
- status = Control::finish_firmware_update(*this);
- CHECK_SUCCESS(status);
- LOGGER__INFO("Firmware update finished.");
-
- if (should_reset) {
- LOGGER__INFO("Resetting...");
- status = reset(get_default_reset_mode());
- CHECK(HAILO_COMMON_STATUS__SUCCESS == fw_header_status, HAILO_INVALID_FIRMWARE,
- "FW update validation failed with status {}", fw_header_status);
- CHECK((status == HAILO_SUCCESS) || (status == HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION), status);
-
- auto board_info_after_install_expected = Control::identify(*this);
- if (board_info_after_install_expected.status() == HAILO_UNSUPPORTED_CONTROL_PROTOCOL_VERSION) {
- LOGGER__INFO("Successfully updated firmware. Protocol version has changed so firmware cannot be specified");
- return HAILO_SUCCESS;
- }
-
- CHECK_EXPECTED_AS_STATUS(board_info_after_install_expected);
- hailo_device_identity_t board_info_after_install = board_info_after_install_expected.release();
-
- LOGGER__INFO("New App FW version: {}.{}.{}{}", board_info_after_install.fw_version.major, board_info_after_install.fw_version.minor,
- board_info_after_install.fw_version.revision, DEV_STRING_NOTE(board_info_after_install.is_release));
-
- // Validating that the new fw version is as expected
- if ((board_info_after_install.fw_version.major != new_app_fw_version.firmware_major) ||
- (board_info_after_install.fw_version.minor != new_app_fw_version.firmware_minor) ||
- (GET_REVISION_NUMBER_VALUE(board_info_after_install.fw_version.revision) != GET_REVISION_NUMBER_VALUE(new_app_fw_version.firmware_revision))) {
- LOGGER__WARNING("New App FW version is different than expected!");
- }
-
- if (board_info_after_install.device_architecture != HAILO_ARCH_HAILO8_A0) {
- hailo_core_information_t core_info_after_install{};
- status = Control::core_identify(*this, &core_info_after_install);
- CHECK_SUCCESS(status);
- LOGGER__INFO("New Core FW version: {}.{}.{}{}", core_info_after_install.fw_version.major, core_info_after_install.fw_version.minor,
- core_info_after_install.fw_version.revision, DEV_STRING_NOTE(core_info_after_install.is_release));
- if ((core_info_after_install.fw_version.major != new_app_fw_version.firmware_major) ||
- (core_info_after_install.fw_version.minor != new_app_fw_version.firmware_minor) ||
- (GET_REVISION_NUMBER_VALUE(core_info_after_install.fw_version.revision) != GET_REVISION_NUMBER_VALUE(new_app_fw_version.firmware_revision))) {
- LOGGER__WARNING("New Core FW version is different than expected!");
- }
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status DeviceBase::second_stage_update(uint8_t* second_stage_binary, uint32_t second_stage_binary_length)
-{
- HAILO_COMMON_STATUS_t second_stage_header_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- hailo_status status = HAILO_UNINITIALIZED;
- firmware_version_t new_second_stage_version = {};
- firmware_version_t minimum_second_stage_version = {1, 1, 0};
- uint32_t offset = 0;
- uint32_t chunk_size = 0;
- MD5_CTX md5_ctx = {};
- MD5_SUM_t md5_sum = {};
- firmware_header_t *new_second_stage_header = NULL;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(second_stage_binary);
-
- MD5_Init(&md5_ctx);
- MD5_Update(&md5_ctx, second_stage_binary, second_stage_binary_length);
- MD5_Final(md5_sum, &md5_ctx);
-
- const auto firmware_type = get_fw_type();
- CHECK_EXPECTED_AS_STATUS(firmware_type);
-
- second_stage_header_status = FIRMWARE_HEADER_UTILS__validate_second_stage_headers((uintptr_t) second_stage_binary,
- second_stage_binary_length, &new_second_stage_header, firmware_type.value());
- CHECK(HAILO_COMMON_STATUS__SUCCESS == second_stage_header_status, HAILO_INVALID_SECOND_STAGE,
- "Second stage update validation failed with status {}", second_stage_header_status);
-
- new_second_stage_version.firmware_major = new_second_stage_header->firmware_major;
- new_second_stage_version.firmware_minor = new_second_stage_header->firmware_minor;
- new_second_stage_version.firmware_revision = new_second_stage_header->firmware_revision;
-
- status = validate_binary_version_for_platform(&new_second_stage_version,
- &minimum_second_stage_version,
- FW_BINARY_TYPE_SECOND_STAGE_BOOT);
- CHECK_SUCCESS(status);
-
- LOGGER__INFO("Updating to version: {}.{}.{}",
- new_second_stage_version.firmware_major, new_second_stage_version.firmware_minor,
- GET_REVISION_NUMBER_VALUE(new_second_stage_version.firmware_revision));
-
- LOGGER__INFO("Writing second stage to internal memory");
- while (offset < second_stage_binary_length) {
- chunk_size = MIN(WRITE_CHUNK_SIZE, (second_stage_binary_length - offset));
- LOGGER__INFO("Writing {} of data to offset {} / {}", chunk_size, offset, second_stage_binary_length);
- status = Control::write_second_stage_to_internal_memory(*this, offset, second_stage_binary + offset, chunk_size);
- CHECK_SUCCESS(status);
- offset += chunk_size;
- }
- status = Control::copy_second_stage_to_flash(*this, &md5_sum, second_stage_binary_length);
- if (HAILO_SUCCESS != status) {
- LOGGER__CRITICAL("Second stage failed in a critical stage, Please contact Hailo support and DO NOT power off the device");
- }
- CHECK_SUCCESS(status);
-
- LOGGER__INFO("Finished copying second stage to flash.");
-
- return HAILO_SUCCESS;
-}
-
-hailo_status DeviceBase::store_sensor_config(uint32_t section_index, hailo_sensor_types_t sensor_type,
- uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
- const std::string &config_file_path, const std::string &config_name)
-{
- CHECK((section_index <= MAX_NON_ISP_SECTIONS), HAILO_INVALID_ARGUMENT,
- "Cannot store sensor config in invalid section {}. Please choose section index (0-{}).", section_index, MAX_NON_ISP_SECTIONS);
- CHECK(sensor_type != HAILO_SENSOR_TYPES_HAILO8_ISP, HAILO_INVALID_ARGUMENT,
- "store_sensor_config intended only for sensor config, for ISP config use store_isp");
-
- auto control_buffers = SensorConfigUtils::read_config_file(config_file_path);
- CHECK_EXPECTED_AS_STATUS(control_buffers, "Failed reading config file");
-
- return store_sensor_control_buffers(control_buffers.value(), section_index, sensor_type,
- reset_config_size, config_height, config_width, config_fps, config_name);
-}
-
-hailo_status DeviceBase::store_isp_config(uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
- const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path, const std::string &config_name)
-{
- auto control_buffers = SensorConfigUtils::read_isp_config_file(isp_static_config_file_path, isp_runtime_config_file_path);
- CHECK_EXPECTED_AS_STATUS(control_buffers, "Failed reading ISP config file");
-
- return store_sensor_control_buffers(control_buffers.value(), SENSOR_CONFIG__ISP_SECTION_INDEX, HAILO_SENSOR_TYPES_HAILO8_ISP,
- reset_config_size, config_height, config_width, config_fps, config_name);
-
-}
-
-Expected<Buffer> DeviceBase::sensor_get_sections_info()
-{
- auto buffer = Buffer::create(SENSOR_SECTIONS_INFO_SIZE);
- CHECK_EXPECTED(buffer);
-
- hailo_status status = Control::sensor_get_sections_info(*this, buffer->data());
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return buffer;
-}
-
-hailo_status DeviceBase::sensor_dump_config(uint32_t section_index, const std::string &config_file_path)
-{
- CHECK(SENSOR_CONFIG__TOTAL_SECTIONS_BLOCK_COUNT > section_index, HAILO_INVALID_ARGUMENT, "Section {} is invalid. Section index must be in the range [0 - {}]", section_index, (SENSOR_CONFIG__TOTAL_SECTIONS_BLOCK_COUNT - 1));
- auto sections_info_buffer = sensor_get_sections_info();
- CHECK_EXPECTED_AS_STATUS(sections_info_buffer);
-
- SENSOR_CONFIG__section_info_t *section_info_ptr = &((SENSOR_CONFIG__section_info_t *)sections_info_buffer->data())[section_index];
- CHECK(section_info_ptr->is_free == 0, HAILO_NOT_FOUND, "Section {} is not active", section_index);
- CHECK(0 == (section_info_ptr->config_size % sizeof(SENSOR_CONFIG__operation_cfg_t)), HAILO_INVALID_OPERATION, "Section config size is invalid.");
-
- /* Read config data from device */
- auto operation_cfg = Buffer::create(section_info_ptr->config_size);
- CHECK_EXPECTED_AS_STATUS(operation_cfg);
-
- size_t read_full_buffer_count = (section_info_ptr->config_size / MAX_CONFIG_ENTRIES_DATA_SIZE);
- uint32_t residue_to_read = static_cast<uint32_t>(section_info_ptr->config_size - (read_full_buffer_count * MAX_CONFIG_ENTRIES_DATA_SIZE));
- uint32_t entries_count = (section_info_ptr->config_size / static_cast<uint32_t>(sizeof(SENSOR_CONFIG__operation_cfg_t)));
- uint32_t offset = 0;
-
- hailo_status status = HAILO_UNINITIALIZED;
- for (uint32_t i = 0; i < read_full_buffer_count; i++) {
- status = Control::sensor_get_config(*this, section_index, offset, (uint32_t)MAX_CONFIG_ENTRIES_DATA_SIZE, (operation_cfg->data() + offset));
- CHECK_SUCCESS(status);
- offset += static_cast<uint32_t>(MAX_CONFIG_ENTRIES_DATA_SIZE);
- }
- if (0 < residue_to_read) {
- status = Control::sensor_get_config(*this, section_index, offset, residue_to_read, (operation_cfg->data() + offset));
- CHECK_SUCCESS(status);
- }
-
- status = SensorConfigUtils::dump_config_to_csv((SENSOR_CONFIG__operation_cfg_t*)operation_cfg->data(), config_file_path, entries_count);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status DeviceBase::sensor_set_i2c_bus_index(hailo_sensor_types_t sensor_type, uint32_t bus_index)
-{
- return Control::sensor_set_i2c_bus_index(*this, sensor_type, bus_index);
-}
-
-hailo_status DeviceBase::sensor_load_and_start_config(uint32_t section_index)
-{
- CHECK((section_index <= MAX_NON_ISP_SECTIONS), HAILO_INVALID_ARGUMENT,
- "Cannot load config from invalid section index {}. Please choose section index (0-{}).",
- section_index, MAX_NON_ISP_SECTIONS);
- return Control::sensor_load_and_start_config(*this, section_index);
-}
-
-hailo_status DeviceBase::sensor_reset(uint32_t section_index)
-{
- CHECK((section_index <= MAX_NON_ISP_SECTIONS), HAILO_INVALID_ARGUMENT,
- "Cannot reset sensor in invalid section index {}. Please choose section index (0-{}).",
- section_index, MAX_NON_ISP_SECTIONS);
- return Control::sensor_reset(*this, section_index);
-}
-
-hailo_status DeviceBase::sensor_set_generic_i2c_slave(uint16_t slave_address, uint8_t offset_size, uint8_t bus_index,
- uint8_t should_hold_bus, uint8_t slave_endianness)
-{
- return Control::sensor_set_generic_i2c_slave(*this, slave_address, offset_size, bus_index, should_hold_bus, slave_endianness);
-}
-
-Expected<Buffer> DeviceBase::read_board_config()
-{
- auto result = Buffer::create(BOARD_CONFIG_SIZE, 0);
- CHECK_EXPECTED(result);
-
- auto status = Control::read_board_config(*this, result->data(), static_cast<uint32_t>(result->size()));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return result.release();
-}
-
-hailo_status DeviceBase::write_board_config(const MemoryView &buffer)
-{
- return Control::write_board_config(*this, buffer.data(), static_cast<uint32_t>(buffer.size()));
-}
-
-Expected<hailo_fw_user_config_information_t> DeviceBase::examine_user_config()
-{
- hailo_fw_user_config_information_t result{};
- auto status = Control::examine_user_config(*this, &result);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return result;
-}
-
-Expected<Buffer> DeviceBase::read_user_config()
-{
- auto user_config_info = examine_user_config();
- CHECK_EXPECTED(user_config_info, "Failed to examine user config");
-
- auto result = Buffer::create(user_config_info->total_size, 0);
- CHECK_EXPECTED(result);
-
- auto status = Control::read_user_config(*this, result->data(), static_cast<uint32_t>(result->size()));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return result.release();
-}
-
-hailo_status DeviceBase::write_user_config(const MemoryView &buffer)
-{
- return Control::write_user_config(*this, buffer.data(), static_cast<uint32_t>(buffer.size()));
-}
-
-hailo_status DeviceBase::erase_user_config()
-{
- return Control::erase_user_config(*this);
-}
-
-void DeviceBase::start_d2h_notification_thread(const std::string &device_id)
-{
- m_d2h_notification_thread = std::thread([this, device_id]() { this->d2h_notification_thread_main(device_id); });
-}
-
-void DeviceBase::stop_d2h_notification_thread()
-{
- static const D2H_EVENT_MESSAGE_t TERMINATE {{0, 0, 0, 0, TERMINATE_EVENT_ID, 0, 0}, {}};
- m_d2h_notification_queue.clear();
- if (m_d2h_notification_thread.joinable()) {
- m_d2h_notification_queue.push(TERMINATE);
- m_d2h_notification_thread.join();
- }
-}
-
-void DeviceBase::d2h_notification_thread_main(const std::string &device_id)
-{
- while (true) {
- auto notification = m_d2h_notification_queue.pop();
- if (notification.header.event_id == TERMINATE_EVENT_ID) {
- LOGGER__DEBUG("[{}] D2H notification thread got terminate signal, returning..", device_id);
- return;
- }
- /* Parse and print the Event info */
- auto d2h_status = D2H_EVENTS__parse_event(¬ification);
- if (HAILO_COMMON_STATUS__SUCCESS != d2h_status) {
- LOGGER__ERROR("[{}] Fail to Parse firmware notification {} status is {}", device_id, notification.header.event_id, d2h_status);
- continue;
- }
-
- hailo_notification_t callback_notification;
- uint32_t notification_fw_id = notification.header.event_id;
- hailo_notification_id_t hailo_notification_id;
- hailo_status status = fw_notification_id_to_hailo((D2H_EVENT_ID_t)notification_fw_id, &hailo_notification_id);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("[{}] Got invalid notification id from fw: {}", device_id, notification_fw_id);
- continue;
- }
-
- LOGGER__INFO("[{}] Got notification from fw with id: {}", device_id, hailo_notification_id);
-
- std::shared_ptr<NotificationCallback> callback_func = nullptr;
- void *callback_opaque = nullptr;
- {
- const std::lock_guard<std::mutex> lock(m_callbacks_lock);
- callback_func = m_d2h_callbacks[hailo_notification_id].func;
- callback_opaque = m_d2h_callbacks[hailo_notification_id].opaque;
- // m_callbacks_lock is freed here because user can call to a function in the callback that will
- // try to acquire it as well - resulting in a dead lock. I did not used recursive_mutex
- // because of the overhead
- }
-
- if (nullptr != callback_func) {
- callback_notification.id = hailo_notification_id;
- callback_notification.sequence = notification.header.sequence;
- static_assert(sizeof(callback_notification.body) == sizeof(notification.message_parameters), "D2H notification size mismatch");
- memcpy(&callback_notification.body, ¬ification.message_parameters, sizeof(notification.message_parameters));
- (*callback_func)(*this, callback_notification, callback_opaque);
- }
- }
-}
-
-hailo_status DeviceBase::check_hef_is_compatible(Hef &hef)
-{
- const auto device_arch = get_architecture();
- CHECK_EXPECTED_AS_STATUS(device_arch, "Can't get device architecture (is the FW loaded?)");
-
- if (!is_hef_compatible(device_arch.value(), hef.pimpl->get_device_arch())) {
- // TODO: print here the actual device_arch as a string
- LOGGER__ERROR("HEF format is not compatible with device. Device arch: {}, HEF arch: {}",
- device_arch.value(), hef.pimpl->get_device_arch());
- return HAILO_INVALID_HEF;
- }
-
- // TODO: MSW-227 check clock rate for mercury as well.
- if ((HAILO_ARCH_HAILO8 == device_arch.value()) || (HAILO_ARCH_HAILO8L == device_arch.value())) {
- auto extended_device_info_expected = Control::get_extended_device_information(*this);
- CHECK_EXPECTED_AS_STATUS(extended_device_info_expected, "Can't get device extended info");
- hailo_extended_device_information_t extended_device_information = extended_device_info_expected.release();
- check_clock_rate_for_hailo8(extended_device_information.neural_network_core_clock_rate,
- hef.pimpl->get_device_arch());
- }
-
- if ((ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == hef.pimpl->get_device_arch()) && (HAILO_ARCH_HAILO8 == device_arch.value())) {
- LOGGER__WARNING(
- "HEF was compiled for Hailo8L device, while the device itself is Hailo8. " \
- "This will result in lower performance.");
- }
-
-
- return HAILO_SUCCESS;
-}
-
-hailo_status DeviceBase::fw_notification_id_to_hailo(D2H_EVENT_ID_t fw_notification_id,
- hailo_notification_id_t* hailo_notification_id)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- switch (fw_notification_id) {
- case ETHERNET_SERVICE_RX_ERROR_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_ETHERNET_RX_ERROR;
- break;
- case D2H_HOST_INFO_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_DEBUG;
- break;
- case HEALTH_MONITOR_TEMPERATURE_ALARM_D2H_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_TEMPERATURE_ALARM;
- break;
- case HEALTH_MONITOR_CLOSED_STREAMS_D2H_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_DATAFLOW_SHUTDOWN;
- break;
- case HEALTH_MONITOR_OVERCURRENT_PROTECTION_ALERT_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_OVERCURRENT_ALARM;
- break;
- case HEALTH_MONITOR_LCU_ECC_CORRECTABLE_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_LCU_ECC_CORRECTABLE_ERROR;
- break;
- case HEALTH_MONITOR_LCU_ECC_UNCORRECTABLE_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_LCU_ECC_UNCORRECTABLE_ERROR;
- break;
- case HEALTH_MONITOR_CPU_ECC_ERROR_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_CPU_ECC_ERROR;
- break;
- case HEALTH_MONITOR_CPU_ECC_FATAL_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_CPU_ECC_FATAL;
- break;
- case CONTEXT_SWITCH_BREAKPOINT_REACHED:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_CONTEXT_SWITCH_BREAKPOINT_REACHED;
- break;
- case HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID:
- *hailo_notification_id = HAILO_NOTIFICATION_ID_HEALTH_MONITOR_CLOCK_CHANGED_EVENT;
- break;
- default:
- status = HAILO_INVALID_ARGUMENT;
- goto l_exit;
- }
-
- status = HAILO_SUCCESS;
-l_exit:
- return status;
-}
-
-hailo_status DeviceBase::validate_binary_version_for_platform(firmware_version_t *new_binary_version,
- firmware_version_t *min_supported_binary_version, FW_BINARY_TYPE_t fw_binary_type)
-{
- HAILO_COMMON_STATUS_t binary_status = FIRMWARE_HEADER_UTILS__validate_binary_version(new_binary_version, min_supported_binary_version,
- fw_binary_type);
- CHECK(HAILO_COMMON_STATUS__SUCCESS == binary_status, HAILO_INVALID_FIRMWARE,
- "FW binary version validation failed with status {}", binary_status);
- return HAILO_SUCCESS;
-}
-
-hailo_status DeviceBase::validate_fw_version_for_platform(const hailo_device_identity_t &board_info, firmware_version_t fw_version, FW_BINARY_TYPE_t fw_binary_type)
-{
- firmware_version_t min_supported_fw_version = {0, 0, 0};
- const firmware_version_t evb_mdot2_min_version = {2, 1, 0};
- const firmware_version_t mpcie_min_version = {2, 2, 0};
-
- if (0 == strncmp(EVB_PART_NUMBER_PREFIX, board_info.part_number, PART_NUMBER_PREFIX_LENGTH) ||
- 0 == strncmp(MDOT2_PART_NUMBER_PREFIX, board_info.part_number, PART_NUMBER_PREFIX_LENGTH)) {
- min_supported_fw_version = evb_mdot2_min_version;
- }
-
- else if (0 == strncmp(MPCIE_PART_NUMBER_PREFIX, board_info.part_number, PART_NUMBER_PREFIX_LENGTH)) {
- min_supported_fw_version = mpcie_min_version;
- }
- else {
- min_supported_fw_version = evb_mdot2_min_version;
- }
-
- return validate_binary_version_for_platform(&fw_version, &min_supported_fw_version, fw_binary_type);
-}
-
-bool DeviceBase::is_hef_compatible(hailo_device_architecture_t device_arch, ProtoHEFHwArch hef_arch)
-{
- switch (device_arch) {
- case HAILO_ARCH_HAILO8:
- return (hef_arch == PROTO__HW_ARCH__HAILO8P) || (hef_arch == PROTO__HW_ARCH__HAILO8R) || (hef_arch == PROTO__HW_ARCH__HAILO8L);
- case HAILO_ARCH_HAILO8L:
- return (hef_arch == PROTO__HW_ARCH__HAILO8L);
- case HAILO_ARCH_MERCURY_CA:
- case HAILO_ARCH_MERCURY_VPU:
- return (hef_arch == PROTO__HW_ARCH__MERCURY) || (hef_arch == PROTO__HW_ARCH__GINGER) ||
- (hef_arch == PROTO__HW_ARCH__LAVENDER);
- default:
- return false;
- }
-}
-
-void DeviceBase::check_clock_rate_for_hailo8(uint32_t clock_rate, ProtoHEFHwArch hef_hw_arch)
-{
- uint32_t expected_clock_rate = (hef_hw_arch == ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8R) ? HAILO8R_CLOCK_RATE : HAILO8_CLOCK_RATE;
- if (expected_clock_rate != clock_rate) {
- LOGGER__WARNING(
- "HEF was compiled assuming clock rate of {} MHz, while the device clock rate is {} MHz. " \
- "FPS calculations might not be accurate.",
- (expected_clock_rate / CLOCKS_IN_MHZ),
- (clock_rate / CLOCKS_IN_MHZ));
- }
-}
-
-hailo_status DeviceBase::store_sensor_control_buffers(const std::vector<SENSOR_CONFIG__operation_cfg_t> &control_buffers, uint32_t section_index, hailo_sensor_types_t sensor_type,
- uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps, const std::string &config_name)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- uint32_t total_data_size = static_cast<uint32_t>(control_buffers.size() * sizeof(control_buffers[0]));
- size_t config_info_full_buffer = control_buffers.size() / MAX_CONFIG_INFO_ENTRIES;
- uint32_t is_first = 1;
- uint32_t offset = 0;
-
- for(uint32_t i = 0; i < config_info_full_buffer; i++) {
- status = Control::sensor_store_config(*this, is_first, section_index, offset, reset_config_size, sensor_type, total_data_size,
- (uint8_t*)control_buffers.data() + offset, (uint32_t)MAX_CONFIG_ENTRIES_DATA_SIZE,
- config_height, config_width, config_fps, static_cast<uint32_t>(config_name.size()), (uint8_t*)config_name.c_str());
- CHECK_SUCCESS(status, "Failed to store sensor config");
-
- offset += (uint32_t)MAX_CONFIG_ENTRIES_DATA_SIZE;
- is_first = 0;
- }
-
- if (offset < total_data_size) {
- status = Control::sensor_store_config(*this, is_first, section_index, offset, reset_config_size, sensor_type, total_data_size,
- (uint8_t*)control_buffers.data() + offset, total_data_size - offset,
- config_height, config_width, config_fps, static_cast<uint32_t>(config_name.size()), (uint8_t*)config_name.c_str());
- CHECK_SUCCESS(status,"Failed to store sensor config");
- }
-
- return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file device_internal.hpp
- * @brief Class declaration for DeviceBase that implements the basic Device "interface" (not technically
- * an interface, but good enough). All internal devices should inherit from the DeviceBase class.
- * Hence, the hierarchy is as follows:
- *
- * Device (External "interface")
- * |-- BaseDevice (Base classes)
- * |-- VdmaDevice
- * | |-- PcieDevice
- * | |-- CoreDevice
- * |-- EthernetDevice
- **/
-
-#ifndef _HAILO_DEVICE_INTERNAL_HPP_
-#define _HAILO_DEVICE_INTERNAL_HPP_
-
-#include "hailo/device.hpp"
-#include "hailo/hailort.h"
-#include "d2h_event_queue.hpp"
-#include "hef_internal.hpp"
-#include "firmware_header.h"
-#include "firmware_header_utils.h"
-#include "control_protocol.h"
-#include "hef_internal.hpp"
-
-#include <thread>
-
-namespace hailort
-{
-
-#define EVB_PART_NUMBER_PREFIX ("HEV18B1C4GA")
-#define MDOT2_PART_NUMBER_PREFIX ("HM218B1C2FA")
-#define MPCIE_PART_NUMBER_PREFIX ("HMP1RB1C2GA")
-
-// Will be used to perfrom generic validation for all variations of a specific module
-#define PART_NUMBER_PREFIX_LENGTH (11)
-
-#define CLOCKS_IN_MHZ (1000 * 1000)
-
-class DeviceBase : public Device
-{
-public:
- DeviceBase(Type type);
- DeviceBase(DeviceBase &&) = delete;
- DeviceBase(const DeviceBase &) = delete;
- DeviceBase &operator=(DeviceBase &&) = delete;
- DeviceBase &operator=(const DeviceBase &) = delete;
- virtual ~DeviceBase();
-
- virtual Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params={}) override;
- virtual hailo_status reset(hailo_reset_device_mode_t mode) override;
- virtual hailo_status set_notification_callback(const NotificationCallback &func, hailo_notification_id_t notification_id, void *opaque) override;
- virtual hailo_status remove_notification_callback(hailo_notification_id_t notification_id) override;
- virtual void activate_notifications(const std::string &device_id);
- virtual void start_notification_fetch_thread(D2hEventQueue *write_queue);
- virtual hailo_status stop_notification_fetch_thread();
- virtual hailo_status firmware_update(const MemoryView &firmware_binary, bool should_reset) override;
- virtual hailo_status second_stage_update(uint8_t *second_stage_binary, uint32_t second_stage_binary_length) override;
- virtual hailo_status store_sensor_config(uint32_t section_index, hailo_sensor_types_t sensor_type,
- uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
- const std::string &config_file_path, const std::string &config_name) override;
- virtual hailo_status store_isp_config(uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps,
- const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path, const std::string &config_name) override;
- virtual Expected<Buffer> sensor_get_sections_info() override;
- virtual hailo_status sensor_dump_config(uint32_t section_index, const std::string &config_file_path) override;
- virtual hailo_status sensor_set_i2c_bus_index(hailo_sensor_types_t sensor_type, uint32_t bus_index) override;
- virtual hailo_status sensor_load_and_start_config(uint32_t section_index) override;
- virtual hailo_status sensor_reset(uint32_t section_index) override;
- virtual hailo_status sensor_set_generic_i2c_slave(uint16_t slave_address, uint8_t offset_size, uint8_t bus_index,
- uint8_t should_hold_bus, uint8_t slave_endianness) override;
- virtual Expected<Buffer> read_board_config() override;
- virtual hailo_status write_board_config(const MemoryView &buffer) override;
- virtual Expected<hailo_fw_user_config_information_t> examine_user_config() override;
- virtual Expected<Buffer> read_user_config() override;
- virtual hailo_status write_user_config(const MemoryView &buffer) override;
- virtual hailo_status erase_user_config() override;
-
-protected:
- struct NotificationThreadSharedParams {
- NotificationThreadSharedParams() : is_running(false) {}
- D2hEventQueue *write_queue;
- bool is_running;
- };
-
- // Special value to signal the d2h notification thread to terminate
- static const uint32_t TERMINATE_EVENT_ID = std::numeric_limits<uint32_t>::max();
-
- virtual hailo_reset_device_mode_t get_default_reset_mode() = 0;
- virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) = 0;
- virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() = 0;
- virtual hailo_status disable_notifications() = 0;
- void start_d2h_notification_thread(const std::string &device_id);
- void stop_d2h_notification_thread();
- void d2h_notification_thread_main(const std::string &device_id);
- hailo_status check_hef_is_compatible(Hef &hef);
-
- virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) = 0;
-
- D2hEventQueue m_d2h_notification_queue;
- std::thread m_d2h_notification_thread;
- std::thread m_notification_fetch_thread;
- std::shared_ptr<NotificationThreadSharedParams> m_notif_fetch_thread_params;
-
-private:
- friend class VDeviceBase;
-
- static hailo_status fw_notification_id_to_hailo(D2H_EVENT_ID_t fw_notification_id,
- hailo_notification_id_t* hailo_notification_id);
- static hailo_status validate_binary_version_for_platform(firmware_version_t *new_binary_version,
- firmware_version_t *min_supported_binary_version, FW_BINARY_TYPE_t fw_binary_type);
- static hailo_status validate_fw_version_for_platform(const hailo_device_identity_t &board_info,
- firmware_version_t fw_version, FW_BINARY_TYPE_t fw_binary_type);
- static bool is_hef_compatible(hailo_device_architecture_t device_arch, ProtoHEFHwArch hw_arch);
- static void check_clock_rate_for_hailo8(uint32_t clock_rate, ProtoHEFHwArch hef_hw_arch);
- hailo_status store_sensor_control_buffers(const std::vector<SENSOR_CONFIG__operation_cfg_t> &control_buffers, uint32_t section_index, hailo_sensor_types_t sensor_type,
- uint32_t reset_config_size, uint16_t config_height, uint16_t config_width, uint16_t config_fps, const std::string &config_name);
- virtual void notification_fetch_thread(std::shared_ptr<NotificationThreadSharedParams> params);
- Expected<firmware_type_t> get_fw_type();
-
- typedef struct {
- std::shared_ptr<NotificationCallback> func;
- void *opaque;
- } d2h_notification_callback_t;
-
- d2h_notification_callback_t m_d2h_callbacks[HAILO_NOTIFICATION_ID_COUNT];
- std::mutex m_callbacks_lock;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_DEVICE_INTERNAL_HPP_ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/eth_device.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/eth_stream.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/hcp_config_core_op.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/hcp_config_activated_core_op.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/network_rate_calculator.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file eth_device.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/device.hpp"
+#include "hailo/hef.hpp"
+
+#include "common/utils.hpp"
+#include "common/ethernet_utils.hpp"
+
+#include "eth/eth_device.hpp"
+#include "eth/udp.hpp"
+#include "device_common/control.hpp"
+#include "network_group/network_group_internal.hpp"
+
+#include <stdlib.h>
+#include <errno.h>
+#include <new>
+#include <array>
+
+
+namespace hailort
+{
+
+#define SCAN_SEQUENCE (0)
+#define WAIT_FOR_DEVICE_WAKEUP_MAX_ATTEMPTS (10)
+#define WAIT_FOR_DEVICE_WAKEUP_TIMEOUT (1000)
+#define ETH_BROADCAST_IP ("255.255.255.255")
+
+
+hailo_status EthernetDevice::fw_interact_impl(uint8_t *request_buffer, size_t request_size,
+ uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id)
+{
+ /* CPU id is used only in PCIe, for Eth all control goes to APP CPU.*/
+ (void)cpu_id;
+ return m_control_udp.fw_interact(request_buffer, request_size, response_buffer, response_size, m_control_sequence);
+}
+
+hailo_status EthernetDevice::wait_for_wakeup()
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+
+ /* Create udp socket */
+ auto udp = Udp::create(m_device_info.device_address.sin_addr, m_device_info.device_address.sin_port,
+ m_device_info.host_address.sin_addr, m_device_info.host_address.sin_port);
+ CHECK_EXPECTED_AS_STATUS(udp);
+
+ status = udp->set_timeout(std::chrono::milliseconds(WAIT_FOR_DEVICE_WAKEUP_TIMEOUT));
+ CHECK_SUCCESS(status);
+
+ status = udp->set_max_number_of_attempts(WAIT_FOR_DEVICE_WAKEUP_MAX_ATTEMPTS);
+ CHECK_SUCCESS(status);
+
+ /* Create and send identify-control until it runs successfully */
+ common_status = CONTROL_PROTOCOL__pack_identify_request(&request, &request_size, m_control_sequence);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ status = udp->fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size,
+ m_control_sequence);
+
+ // Always increment sequence
+ m_control_sequence = (m_control_sequence + 1) % CONTROL__MAX_SEQUENCE;
+ CHECK_SUCCESS(status);
+
+ /* Parse and validate the response */
+ return Control::parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
+}
+
+Expected<std::unique_ptr<EthernetDevice>> EthernetDevice::create(const hailo_eth_device_info_t &device_info)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ // Creates control socket
+ auto udp = Udp::create(device_info.device_address.sin_addr, device_info.device_address.sin_port,
+ device_info.host_address.sin_addr, device_info.host_address.sin_port);
+ CHECK_EXPECTED(udp, "Failed to init control socket.");
+
+ auto device = std::unique_ptr<EthernetDevice>(new (std::nothrow) EthernetDevice(device_info, udp.release(), status));
+ CHECK_AS_EXPECTED((nullptr != device), HAILO_OUT_OF_HOST_MEMORY);
+
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed creating EthernetDevice");
+ return make_unexpected(status);
+ }
+
+ return device;
+}
+
+Expected<std::unique_ptr<EthernetDevice>> EthernetDevice::create(const std::string &ip_addr)
+{
+ const bool LOG_ON_FAILURE = true;
+ auto device_info = parse_eth_device_info(ip_addr, LOG_ON_FAILURE);
+ CHECK_EXPECTED(device_info, "Failed to parse ip address {}", ip_addr);
+ return create(device_info.release());
+}
+
+EthernetDevice::EthernetDevice(const hailo_eth_device_info_t &device_info, Udp &&control_udp, hailo_status &status) :
+ DeviceBase::DeviceBase(Device::Type::ETH),
+ m_device_info(device_info),
+ m_control_udp(std::move(control_udp))
+{
+ char ip_buffer[INET_ADDRSTRLEN];
+ status = Socket::ntop(AF_INET, &(device_info.device_address.sin_addr), ip_buffer, INET_ADDRSTRLEN);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Socket::ntop() failed with status {}", status);
+ return;
+ }
+ m_device_id = std::string(ip_buffer);
+
+ status = m_control_udp.set_timeout(std::chrono::milliseconds(m_device_info.timeout_millis));
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to init set timeout for control socket.");
+ return;
+ }
+
+ status = m_control_udp.set_max_number_of_attempts(m_device_info.max_number_of_attempts);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to init set max_number_of_attempts for control socket.");
+ return;
+ }
+
+ status = update_fw_state();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("update_fw_state() failed with status {}", status);
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<size_t> EthernetDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
+{
+ (void) buffer;
+ (void) cpu_id;
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+static void eth_device__fill_eth_device_info(Udp &udp, hailo_eth_device_info_t *eth_device_info)
+{
+ eth_device_info->device_address.sin_family = AF_INET;
+ eth_device_info->device_address.sin_addr = udp.m_device_address.sin_addr;
+ eth_device_info->device_address.sin_port = HAILO_DEFAULT_ETH_CONTROL_PORT;
+
+ eth_device_info->host_address.sin_family = AF_INET;
+ eth_device_info->host_address.sin_addr.s_addr = INADDR_ANY;
+ eth_device_info->host_address.sin_port = HAILO_ETH_PORT_ANY;
+
+ eth_device_info->max_number_of_attempts = HAILO_DEFAULT_ETH_MAX_NUMBER_OF_RETRIES;
+ eth_device_info->max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
+ eth_device_info->timeout_millis = HAILO_DEFAULT_ETH_SCAN_TIMEOUT_MS;
+
+ char textual_ip_address[INET_ADDRSTRLEN];
+ auto inet = inet_ntop(AF_INET, &(udp.m_device_address.sin_addr), textual_ip_address, INET_ADDRSTRLEN);
+ if (NULL != inet) {
+ LOGGER__DEBUG("Found Hailo device: {}", textual_ip_address);
+ }
+}
+
+static Expected<hailo_eth_device_info_t> eth_device__handle_available_data(Udp &udp)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ /* Try to receive data from the udp socket and log timeouts in debug level */
+ status = udp.has_data(true);
+ if (HAILO_TIMEOUT == status) {
+ LOGGER__DEBUG("Scan timeout");
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ hailo_eth_device_info_t device_info{};
+ eth_device__fill_eth_device_info(udp, &device_info);
+
+ return device_info;
+}
+
+static Expected<std::vector<hailo_eth_device_info_t>> eth_device__receive_responses(Udp &udp)
+{
+ std::vector<hailo_eth_device_info_t> results;
+ while (true) {
+ auto next_device_info = eth_device__handle_available_data(udp);
+ if (next_device_info.has_value()) {
+ results.emplace_back(next_device_info.release());
+ } else if (HAILO_TIMEOUT == next_device_info.status()) {
+ // We excpect to stop receiving data due to timeout
+ break;
+ } else {
+ // Any other reason indicates a problem
+ return make_unexpected(next_device_info.status());
+ }
+ }
+
+ return results;
+}
+
+Expected<std::vector<hailo_eth_device_info_t>> EthernetDevice::scan(const std::string &interface_name,
+ std::chrono::milliseconds timeout)
+{
+ // Convert interface name to IP address
+ std::array<char, IPV4_STRING_MAX_LENGTH> interface_ip_address{};
+ auto status = EthernetUtils::get_ip_from_interface(interface_name.c_str(), interface_ip_address.data(), interface_ip_address.size());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return scan_by_host_address(interface_ip_address.data(), timeout);
+}
+
+hailo_status get_udp_broadcast_params(const char *host_address, struct in_addr &interface_ip_address,
+ struct in_addr &broadcast_ip_address)
+{
+ assert(nullptr != host_address);
+
+ auto status = Socket::pton(AF_INET, host_address, &interface_ip_address);
+ CHECK_SUCCESS(status, "Invalid host ip address {}", host_address);
+ status = Socket::pton(AF_INET, ETH_BROADCAST_IP, &broadcast_ip_address);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::vector<hailo_eth_device_info_t>> EthernetDevice::scan_by_host_address(const std::string &host_address,
+ std::chrono::milliseconds timeout)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request{};
+ size_t request_size = 0;
+ uint32_t sequence = SCAN_SEQUENCE;
+ struct in_addr broadcast_ip_address{};
+ struct in_addr interface_ip_address{};
+
+ status = get_udp_broadcast_params(host_address.c_str(), interface_ip_address, broadcast_ip_address);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Create broadcast udp object */
+ auto udp_broadcast = Udp::create(broadcast_ip_address, HAILO_DEFAULT_ETH_CONTROL_PORT, interface_ip_address, 0);
+ CHECK_EXPECTED(udp_broadcast);
+ status = udp_broadcast->set_timeout(timeout);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Build identify request */
+ common_status = CONTROL_PROTOCOL__pack_identify_request(&request, &request_size, sequence);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Send broadcast identify request */
+ status = udp_broadcast->send((uint8_t *)&request, &request_size, false, MAX_UDP_PAYLOAD_SIZE);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Receive all responses */
+ return eth_device__receive_responses(*udp_broadcast);
+}
+
+Expected<hailo_eth_device_info_t> EthernetDevice::parse_eth_device_info(const std::string &ip_addr,
+ bool log_on_failure)
+{
+ hailo_eth_device_info_t device_info{};
+
+ device_info.host_address.sin_family = AF_INET;
+ device_info.host_address.sin_port = HAILO_ETH_PORT_ANY;
+
+ auto status = Socket::pton(AF_INET, HAILO_ETH_ADDRESS_ANY, &(device_info.host_address.sin_addr));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ device_info.device_address.sin_family = AF_INET;
+ device_info.device_address.sin_port = HAILO_DEFAULT_ETH_CONTROL_PORT;
+ status = Socket::pton(AF_INET, ip_addr.c_str(), &(device_info.device_address.sin_addr));
+ if (status != HAILO_SUCCESS) {
+ if (log_on_failure) {
+ LOGGER__ERROR("Invalid ip address {}", ip_addr);
+ }
+ return make_unexpected(status);
+ }
+
+ device_info.timeout_millis = HAILO_DEFAULT_ETH_SCAN_TIMEOUT_MS;
+ device_info.max_number_of_attempts = HAILO_DEFAULT_ETH_MAX_NUMBER_OF_RETRIES;
+ device_info.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
+
+ return device_info;
+}
+
+void EthernetDevice::increment_control_sequence()
+{
+ m_control_sequence = (m_control_sequence + 1) % CONTROL__MAX_SEQUENCE;
+}
+
+hailo_reset_device_mode_t EthernetDevice::get_default_reset_mode()
+{
+ return HAILO_RESET_DEVICE_MODE_CHIP;
+}
+
+hailo_status EthernetDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ bool is_expecting_response = true;
+
+ switch (reset_type) {
+ case CONTROL_PROTOCOL__RESET_TYPE__CHIP:
+ is_expecting_response = false;
+ break;
+ case CONTROL_PROTOCOL__RESET_TYPE__SOFT:
+ /* Fallthrough */
+ case CONTROL_PROTOCOL__RESET_TYPE__FORCED_SOFT:
+ is_expecting_response = false; // TODO: Check boot source, set is_expecting_response = (boot_source != pcie)
+ break;
+ default:
+ is_expecting_response = true;
+ break;
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_reset_request(&request, &request_size, m_control_sequence, reset_type);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ /* On non-reponse controls we set the response_size to 0 */
+ if (!is_expecting_response) {
+ response_size = 0;
+ }
+
+ LOGGER__DEBUG("Sending reset request");
+ status = this->fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ // fw_interact should return success even if response is not expected
+ CHECK_SUCCESS(status);
+
+ /* Parse response if expected */
+ // TODO: fix logic with respect to is_expecting_response
+ if (0 != response_size) {
+ status = Control::parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header,
+ &payload, &request);
+ CHECK_SUCCESS(status);
+ CHECK(is_expecting_response, HAILO_INTERNAL_FAILURE,
+ "Recived valid response from FW for control who is not expecting one.");
+ } else {
+ status = this->wait_for_wakeup();
+ CHECK_SUCCESS(status);
+ }
+
+ LOGGER__DEBUG("Board has been reset successfully");
+ return HAILO_SUCCESS;
+}
+
+Expected<hailo_device_architecture_t> EthernetDevice::get_architecture() const
+{
+ // FW is always up if we got here (EthernetDevice's ctor would fail otherwise)
+ // Hence, just return it
+ return Expected<hailo_device_architecture_t>(m_device_architecture);
+}
+
+hailo_eth_device_info_t EthernetDevice::get_device_info() const
+{
+ return m_device_info;
+}
+
+const char *EthernetDevice::get_dev_id() const
+{
+ return m_device_id.c_str();
+}
+
+Expected<D2H_EVENT_MESSAGE_t> EthernetDevice::read_notification()
+{
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+hailo_status EthernetDevice::disable_notifications()
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<ConfiguredNetworkGroupVector> EthernetDevice::add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params)
+{
+ // Reset FW state_machine status - can be removed?
+ static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
+ auto status = Control::reset_context_switch_state_machine(*this, REMOVE_NN_CONFIG_DURING_RESET);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto added_network_groups = create_networks_group_vector(hef, configure_params);
+ CHECK_EXPECTED(added_network_groups);
+
+ return added_network_groups;
+}
+
+Expected<ConfiguredNetworkGroupVector> EthernetDevice::create_networks_group_vector(Hef &hef, const NetworkGroupsParamsMap &configure_params)
+{
+ auto partial_clusters_layout_bitmap_exp = Control::get_partial_clusters_layout_bitmap(*this);
+ CHECK_EXPECTED(partial_clusters_layout_bitmap_exp);
+ auto partial_clusters_layout_bitmap = partial_clusters_layout_bitmap_exp.release();
+
+ auto &hef_network_groups = hef.pimpl->network_groups();
+ auto configure_params_copy = configure_params;
+ ConfiguredNetworkGroupVector added_network_groups;
+ // TODO: can be optimized (add another loop the allocate the network group we're adding)
+ added_network_groups.reserve(hef_network_groups.size());
+
+ for (const auto &hef_net_group : hef_network_groups) {
+ const std::string &network_group_name = hef_net_group->network_group_metadata().network_group_name();
+
+ /* If NG params are present, use them
+ If no configure params are given, use default*/
+ ConfigureNetworkParams config_params{};
+ if (contains(configure_params, network_group_name)) {
+ config_params = configure_params_copy.at(network_group_name);
+ configure_params_copy.erase(network_group_name);
+ } else if (configure_params.empty()) {
+ auto config_params_exp = create_configure_params(hef, network_group_name);
+ CHECK_EXPECTED(config_params_exp);
+ config_params = config_params_exp.release();
+ } else {
+ continue;
+ }
+
+ auto net_group_config = create_core_op_metadata(hef, network_group_name, partial_clusters_layout_bitmap);
+ CHECK_EXPECTED(net_group_config);
+
+ // TODO: move to func, support multiple core ops
+ std::vector<std::shared_ptr<CoreOp>> core_ops_ptrs;
+
+ auto core_op_metadata = hef.pimpl->get_core_op_metadata(network_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ auto core_op_metadata_ptr = make_shared_nothrow<CoreOpMetadata>(core_op_metadata.release());
+ CHECK_AS_EXPECTED(nullptr != core_op_metadata_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto net_flow_ops = hef.pimpl->post_process_ops(core_op_metadata_ptr->core_op_name());
+
+ auto status = HAILO_UNINITIALIZED;
+ auto single_context_app = HcpConfigCoreOp(*this, m_active_core_op_holder, net_group_config.release(),
+ config_params, core_op_metadata_ptr, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto core_op_ptr = make_shared_nothrow<HcpConfigCoreOp>(std::move(single_context_app));
+ CHECK_AS_EXPECTED(nullptr != core_op_ptr, HAILO_OUT_OF_HOST_MEMORY);
+ // TODO: move this func into HcpConfigCoreOp c'tor
+ status = core_op_ptr->create_streams_from_config_params(*this);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // Check that all boundary streams were created
+ status = hef.pimpl->validate_boundary_streams_were_created(network_group_name, core_op_ptr);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ m_core_ops.push_back(core_op_ptr);
+ core_ops_ptrs.push_back(core_op_ptr);
+
+ auto net_group_expected = ConfiguredNetworkGroupBase::create(config_params, std::move(core_ops_ptrs), std::move(net_flow_ops));
+ CHECK_EXPECTED(net_group_expected);
+ auto net_group_ptr = net_group_expected.release();
+
+ added_network_groups.emplace_back(net_group_ptr);
+ m_network_groups.push_back(net_group_ptr);
+ }
+
+ std::string unmatched_keys = "";
+ for (const auto &pair : configure_params_copy) {
+ unmatched_keys.append(" ");
+ unmatched_keys.append(pair.first);
+ }
+ CHECK_AS_EXPECTED(unmatched_keys.size() == 0, HAILO_INVALID_ARGUMENT,
+ "Some network group names in the configuration are not found in the hef file:{}", unmatched_keys);
+
+ return added_network_groups;
+}
+
+Expected<std::vector<WriteMemoryInfo>> EthernetDevice::create_core_op_metadata(Hef &hef, const std::string &core_op_name, uint32_t partial_clusters_layout_bitmap)
+{
+ auto device_arch_exp = get_architecture();
+ CHECK_EXPECTED(device_arch_exp);
+ auto device_arch = device_arch_exp.release();
+ auto hef_arch = hef.pimpl->get_device_arch();
+
+ auto &hef_core_ops = hef.pimpl->core_ops(core_op_name);
+ assert(1 == hef_core_ops.size());
+ const auto &core_op = hef_core_ops[0];
+
+ auto expected_partial_core_op = Hef::Impl::get_core_op_per_arch(core_op, hef_arch, device_arch,
+ partial_clusters_layout_bitmap);
+ CHECK_EXPECTED(expected_partial_core_op);
+ auto partial_core_op = expected_partial_core_op.release();
+
+ // TODO: decide about core_op names - align with the Compiler
+
+ /* Validate that all core_ops are single context */
+ CHECK(1 == partial_core_op->contexts.size(), make_unexpected(HAILO_INTERNAL_FAILURE),
+ "Only single-context core-ops are supported!. Core-op {} has {} contexts.",
+ core_op_name, partial_core_op->contexts.size());
+ CHECK_AS_EXPECTED(!(Hef::Impl::contains_ddr_layers(*partial_core_op)), HAILO_INVALID_OPERATION,
+ "DDR layers are only supported for PCIe device. Core-op {} contains DDR layers.",
+ core_op_name);
+ auto status = Hef::Impl::validate_core_op_unique_layer_names(*partial_core_op);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ /* Update preliminary_config and dynamic_contexts recepies */
+ auto &proto_preliminary_config = partial_core_op->preliminary_config;
+ auto core_op_config = Hef::Impl::create_single_context_core_op_config(proto_preliminary_config);
+ CHECK_EXPECTED(core_op_config);
+
+ return core_op_config;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file eth_device.hpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#ifndef HAILO_ETH_DEVICE_H_
+#define HAILO_ETH_DEVICE_H_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "device_common/device_internal.hpp"
+#include "eth/udp.hpp"
+#include "eth/hcp_config_core_op.hpp"
+
+
+namespace hailort
+{
+
+class EthernetDevice : public DeviceBase {
+public:
+ virtual hailo_status fw_interact_impl(uint8_t *request_buffer, size_t request_size,
+ uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id) override;
+ virtual Expected<size_t> read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id) override;
+ virtual hailo_status wait_for_wakeup() override;
+ virtual void increment_control_sequence() override;
+ virtual hailo_reset_device_mode_t get_default_reset_mode() override;
+ virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
+
+ virtual bool is_stream_interface_supported(const hailo_stream_interface_t &stream_interface) const override
+ {
+ switch (stream_interface) {
+ case HAILO_STREAM_INTERFACE_PCIE:
+ case HAILO_STREAM_INTERFACE_INTEGRATED:
+ return false;
+ case HAILO_STREAM_INTERFACE_ETH:
+ case HAILO_STREAM_INTERFACE_MIPI:
+ return true;
+ default:
+ LOGGER__ERROR("Invalid stream interface");
+ return false;
+ }
+ }
+
+ static Expected<std::vector<hailo_eth_device_info_t>> scan(const std::string &interface_name,
+ std::chrono::milliseconds timeout);
+ static Expected<std::vector<hailo_eth_device_info_t>> scan_by_host_address(const std::string &host_address,
+ std::chrono::milliseconds timeout);
+ static Expected<hailo_eth_device_info_t> parse_eth_device_info(const std::string &ip_addr, bool log_on_failure);
+
+ static Expected<std::unique_ptr<EthernetDevice>> create(const hailo_eth_device_info_t &device_info);
+ static Expected<std::unique_ptr<EthernetDevice>> create(const std::string &ip_addr);
+ virtual Expected<hailo_device_architecture_t> get_architecture() const override;
+ hailo_eth_device_info_t get_device_info() const;
+ virtual const char* get_dev_id() const override;
+
+protected:
+ virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() override;
+ virtual hailo_status disable_notifications() override;
+ virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) override;
+
+private:
+ EthernetDevice(const hailo_eth_device_info_t &device_info, Udp &&control_udp, hailo_status &status);
+ Expected<ConfiguredNetworkGroupVector> create_networks_group_vector(Hef &hef, const NetworkGroupsParamsMap &configure_params);
+ Expected<std::vector<WriteMemoryInfo>> create_core_op_metadata(Hef &hef, const std::string &core_op_name, uint32_t partial_clusters_layout_bitmap);
+
+ const hailo_eth_device_info_t m_device_info;
+ std::string m_device_id;
+ Udp m_control_udp;
+ std::vector<std::shared_ptr<CoreOp>> m_core_ops;
+ std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups; // TODO: HRT-9547 - Remove when ConfiguredNetworkGroup will be kept in global context
+ ActiveCoreOpHolder m_active_core_op_holder;
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_ETH_DEVICE_H_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file eth_stream.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+
+
+#include "hailo/hailort.h"
+#include "hailo/stream.hpp"
+#include "hailo/hef.hpp"
+#include "hailo/hailort_common.hpp"
+
+#include "common/ethernet_utils.hpp"
+#include "common/utils.hpp"
+
+#include "eth/eth_stream.hpp"
+#include "eth/eth_device.hpp"
+#include "eth/token_bucket.hpp"
+#include "device_common/control.hpp"
+
+#include <new>
+#include <stdlib.h>
+#include <math.h>
+#include <byte_order.h>
+
+
+namespace hailort
+{
+
+#define SYNC_PACKET_BARKER (0xa143341a)
+
+
+typedef struct hailo_output_sync_packet_t {
+ uint32_t barker;
+ uint32_t sequence_index;
+} hailo_output_sync_packet_t;
+
+EthernetInputStream::~EthernetInputStream()
+{
+ if (m_is_stream_activated) {
+ auto status = this->deactivate_stream();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Close stream failed! (status {} stream index {})", status, m_stream_info.index);
+ }
+ }
+}
+
+
+Expected<Udp> eth_stream__create_udp(EthernetDevice *eth_device, struct sockaddr_in host_address, uint8_t stream_index,
+ port_t device_port, bool is_input)
+{
+ if (HAILO_DEFAULT_ETH_DEVICE_PORT == device_port) {
+ if (is_input) {
+ device_port = (uint16_t)(stream_index + HailoRTCommon::ETH_INPUT_BASE_PORT);
+ } else {
+ device_port = (uint16_t)(stream_index + HailoRTCommon::ETH_OUTPUT_BASE_PORT);
+ }
+ }
+
+ return Udp::create(eth_device->get_device_info().device_address.sin_addr, device_port, host_address.sin_addr,
+ host_address.sin_port);
+}
+
+/** Input stream **/
+hailo_status EthernetInputStream::deactivate_stream()
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ ASSERT(m_is_stream_activated);
+
+ // TODO: Hold a ref not a pointer
+ status = Control::close_stream(m_device, m_dataflow_manager_id, true);
+ CHECK_SUCCESS(status);
+
+ m_is_stream_activated = false;
+
+ return HAILO_SUCCESS;
+}
+
+// Note: Ethernet streams don't work with dynamic batch sizes
+hailo_status EthernetInputStream::activate_stream(uint16_t /* dynamic_batch_size */, bool /* resume_pending_stream_transfers */)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ CONTROL_PROTOCOL__config_stream_params_t params = {};
+
+ params.nn_stream_config = m_nn_stream_config;
+ params.communication_type = CONTROL_PROTOCOL__COMMUNICATION_TYPE_UDP;
+ params.is_input = true;
+ params.stream_index = m_stream_info.index;
+ params.communication_params.udp_input.listening_port = (uint16_t)(BYTE_ORDER__htons(m_udp.m_device_address.sin_port));
+ params.skip_nn_stream_config = false;
+ // Currently hardcoded assign as there are no power mode optimizations over eth
+ params.power_mode = static_cast<uint8_t>(CONTROL_PROTOCOL__MODE_ULTRA_PERFORMANCE);
+
+ if (this->configuration.is_sync_enabled) {
+ params.communication_params.udp_input.sync.should_sync = true;
+ params.communication_params.udp_input.sync.frames_per_sync = this->configuration.frames_per_sync;
+ params.communication_params.udp_input.sync.packets_per_frame = this->configuration.packets_per_frame;
+ params.communication_params.udp_input.sync.sync_size = this->configuration.sync_size;
+ }
+
+ params.communication_params.udp_input.buffers_threshold = this->configuration.buffers_threshold;
+ params.communication_params.udp_input.use_rtp = false;
+
+ status = Control::config_stream_udp_input(m_device, ¶ms, m_dataflow_manager_id);
+ CHECK_SUCCESS(status);
+
+ status = Control::open_stream(m_device, m_dataflow_manager_id, true);
+ CHECK_SUCCESS(status);
+
+ m_is_stream_activated = true;
+
+ return HAILO_SUCCESS;
+}
+
+Expected<size_t> EthernetInputStream::sync_write_raw_buffer(const MemoryView &buffer)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ status = get_core_op_activated_event()->wait(std::chrono::milliseconds(0));
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED, "Trying to write on stream before its network_group is activated");
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ size_t size = buffer.size();
+ status = m_udp.send((uint8_t*)buffer.data(), &size, this->configuration.use_dataflow_padding, this->configuration.max_payload_size);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Udp send was aborted!");
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status, "{} (H2D) failed with status={}", name(), status);
+
+ return size;
+}
+
+hailo_status EthernetInputStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ ASSERT(NULL != buffer);
+
+ CHECK(size >= MIN_UDP_PAYLOAD_SIZE, HAILO_INVALID_ARGUMENT, "Input must be larger than {}", MIN_UDP_PAYLOAD_SIZE);
+ CHECK(((size % HailoRTCommon::HW_DATA_ALIGNMENT) == 0), HAILO_INVALID_ARGUMENT,
+ "Input must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, size);
+
+ if (this->configuration.is_sync_enabled) {
+ status = eth_stream__write_all_with_sync(buffer, offset, size);
+ } else {
+ status = eth_stream__write_all_no_sync(buffer, offset, size);
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("eth_stream__write_all was aborted!");
+ return status;
+ }
+
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status EthernetInputStream::eth_stream__write_all_no_sync(void *buffer, size_t offset, size_t size) {
+ size_t remainder_size = 0;
+ size_t packet_size = this->configuration.max_payload_size;
+
+ //if we have padding, consider it when calculating the packet sizes
+ if (this->configuration.use_dataflow_padding) {
+ packet_size -= PADDING_BYTES_SIZE + PADDING_ALIGN_BYTES;
+ }
+
+ remainder_size = size % packet_size;
+
+ if ((0 < remainder_size) && (remainder_size < MIN_UDP_PAYLOAD_SIZE)) {
+ remainder_size = MIN_UDP_PAYLOAD_SIZE;
+ }
+ return eth_stream__write_with_remainder(buffer, offset, size, remainder_size);
+}
+
+hailo_status EthernetInputStream::eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size) {
+ size_t transfer_size = 0;
+ size_t offset_end_without_remainder = offset + size - remainder_size;
+
+ while (offset < offset_end_without_remainder) {
+ transfer_size = offset_end_without_remainder - offset;
+ auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, transfer_size));
+ if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
+ LOGGER__INFO("sync_write_raw_buffer was aborted!");
+ return expected_bytes_written.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
+ offset += expected_bytes_written.release();
+ }
+ if (0 < remainder_size) {
+ auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, remainder_size));
+ if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
+ LOGGER__INFO("sync_write_raw_buffer was aborted!");
+ return expected_bytes_written.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
+ assert(expected_bytes_written.value() == remainder_size);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+EthernetInputStreamRateLimited::EthernetInputStreamRateLimited(Device &device, Udp &&udp,
+ EventPtr &&core_op_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status) :
+ EthernetInputStream::EthernetInputStream(device, std::move(udp), std::move(core_op_activated_event), layer_info, status),
+ rate_bytes_per_sec(rate_bytes_per_sec)
+{}
+
+TokenBucketEthernetInputStream::TokenBucketEthernetInputStream(Device &device, Udp &&udp,
+ EventPtr &&core_op_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status) :
+ EthernetInputStreamRateLimited::EthernetInputStreamRateLimited(device, std::move(udp),
+ std::move(core_op_activated_event), rate_bytes_per_sec, layer_info, status),
+ token_bucket()
+{}
+
+hailo_status TokenBucketEthernetInputStream::eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size) {
+ size_t transfer_size = 0;
+ size_t offset_end_without_remainder = offset + size - remainder_size;
+
+ assert(remainder_size <= MAX_CONSUME_SIZE);
+ static_assert(MAX_CONSUME_SIZE <= BURST_SIZE, "We are asking to consume more bytes than the size of the token bucket, this will fail");
+
+ while (offset < offset_end_without_remainder) {
+ (void)token_bucket.consumeWithBorrowAndWait(MAX_CONSUME_SIZE, rate_bytes_per_sec, BURST_SIZE);
+
+ transfer_size = offset_end_without_remainder - offset;
+ auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, transfer_size));
+ if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
+ LOGGER__INFO("sync_write_raw_buffer was aborted!");
+ return expected_bytes_written.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
+ offset += expected_bytes_written.release();
+ }
+ if (0 < remainder_size) {
+ // We don't static_assert that "remainder_size <= BURST_SIZE", so the call could fail in theory.
+ // However, since remainder_size is modulo MAX_UDP_PAYLOAD_SIZE and BURST_SIZE == MAX_UDP_PAYLOAD_SIZE, it should be smaller.
+ (void)token_bucket.consumeWithBorrowAndWait(static_cast<double>(remainder_size), rate_bytes_per_sec, BURST_SIZE);
+
+ auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, remainder_size));
+ if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
+ LOGGER__INFO("sync_write_raw_buffer was aborted!");
+ return expected_bytes_written.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
+ assert(expected_bytes_written.value() == remainder_size);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+#if defined(__GNUC__)
+Expected<std::unique_ptr<TrafficControlEthernetInputStream>> TrafficControlEthernetInputStream::create(
+ Device &device, Udp &&udp, EventPtr &&core_op_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info)
+{
+ auto board_ip = get_interface_address(&udp.m_device_address.sin_addr);
+ CHECK_EXPECTED(board_ip, "get_interface_address failed with status {}", board_ip.status());
+
+ const auto board_port = BYTE_ORDER__ntohs(udp.m_device_address.sin_port);
+
+ auto tc = TrafficControl::create(board_ip.value(), board_port, rate_bytes_per_sec);
+ CHECK_EXPECTED(tc, "Creating traffic control at rate {} failed with error {}", rate_bytes_per_sec, tc.status());
+
+ auto status = HAILO_UNINITIALIZED;
+ // Note: we don't use make_unique because TrafficControlEthernetInputStream's ctor is private
+ auto tc_ptr = std::unique_ptr<TrafficControlEthernetInputStream>(new (std::nothrow)
+ TrafficControlEthernetInputStream(device, std::move(udp), std::move(core_op_activated_event), rate_bytes_per_sec,
+ tc.release(), layer_info, status));
+ CHECK_AS_EXPECTED(nullptr != tc_ptr, HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return tc_ptr;
+}
+
+Expected<std::string> TrafficControlEthernetInputStream::get_interface_address(const struct in_addr *addr)
+{
+ auto ip = Buffer::create(IPV4_STRING_MAX_LENGTH, 0);
+ CHECK_EXPECTED(ip);
+
+ const auto result = Socket::ntop(AF_INET, addr, ip->as_pointer<char>(), EthernetUtils::MAX_INTERFACE_SIZE);
+ CHECK_SUCCESS_AS_EXPECTED(result, "Failed parsing IP to string with status {}", result);
+
+ return ip->to_string();
+}
+
+TrafficControlEthernetInputStream::TrafficControlEthernetInputStream(Device &device, Udp &&udp,
+ EventPtr &&core_op_activated_event, uint32_t rate_bytes_per_sec, TrafficControl &&tc, const LayerInfo &layer_info, hailo_status &status) :
+ EthernetInputStreamRateLimited(device, std::move(udp), std::move(core_op_activated_event), rate_bytes_per_sec, layer_info, status),
+ m_tc(std::move(tc))
+{}
+#endif
+
+hailo_status EthernetInputStream::eth_stream__write_all_with_sync(void *buffer, size_t offset, size_t size) {
+ hailo_status status = HAILO_UNINITIALIZED;
+ size_t number_of_frames = 0;
+ size_t frame_size = m_stream_info.hw_frame_size;
+
+ if (0 != (size % frame_size)) {
+ LOGGER__ERROR("Read size is not a multiple of frame size."
+ "This operation is not possible with the sync packet mode."
+ "Tried to read {} bytes and frame size is {}", size, m_stream_info.hw_frame_size);
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ number_of_frames = size / frame_size;
+ for (size_t i = 0; i < number_of_frames; i++) {
+ // Write frame by frame, whereas the remainder packet is the sync packet
+ status = eth_stream__write_with_remainder(buffer, offset, frame_size, this->configuration.sync_size);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("eth_stream__write_with_remainder was aborted!");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ offset += frame_size;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status EthernetInputStream::eth_stream__config_input_sync_params(uint32_t frames_per_sync)
+{
+ size_t packet_size = MAX_UDP_PAYLOAD_SIZE;
+
+ if (MAX_UDP_PAYLOAD_SIZE >= m_stream_info.hw_frame_size) {
+ LOGGER__WARNING("Input size that isn't larger than {} doesn't benefit from sync, disabling..", MAX_UDP_PAYLOAD_SIZE);
+ this->configuration.is_sync_enabled = false;
+ return HAILO_SUCCESS;
+ }
+ this->configuration.is_sync_enabled = true;
+ CHECK(1 == frames_per_sync, HAILO_NOT_IMPLEMENTED,
+ "Currently not supported frames_per_sync != 1");
+ this->configuration.frames_per_sync = frames_per_sync;
+ //if we have padding, consider it when determining the number of packets
+ if (this->configuration.use_dataflow_padding) {
+ packet_size = MAX_UDP_PADDED_PAYLOAD_SIZE;
+ }
+ // Data packets per frame are all of the packets except the sync
+ this->configuration.packets_per_frame = (uint32_t) ceil((double) m_stream_info.hw_frame_size / (double) packet_size) - 1;
+ if (0 == (m_stream_info.hw_frame_size % packet_size)) {
+ // If there is no remainder to make the sync packet, we will "cut" it from the last data packet, thus increasing the number of packets.
+ this->configuration.packets_per_frame++;
+ }
+ // Make the remainder packet the sync packet
+ this->configuration.sync_size = (uint16_t)(m_stream_info.hw_frame_size % packet_size);
+
+ if (MIN_UDP_PAYLOAD_SIZE > this->configuration.sync_size) {
+ // If the remainder isn't big enough, we'll "cut" from the last data packet enough to fill the minimum size.
+ this->configuration.sync_size = MIN_UDP_PAYLOAD_SIZE;
+ }
+ LOGGER__DEBUG("Configured sync size {}, packets per frame {}", this->configuration.sync_size, this->configuration.packets_per_frame);
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<EthernetInputStream>> EthernetInputStream::create(Device &device,
+ const LayerInfo &edge_layer, const hailo_eth_input_stream_params_t ¶ms, EventPtr core_op_activated_event)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ // TODO: try to avoid cast
+ auto eth_device = reinterpret_cast<EthernetDevice*>(&device);
+ std::unique_ptr<EthernetInputStream> local_stream;
+
+ auto stream_index = edge_layer.stream_index;
+ auto udp = eth_stream__create_udp(eth_device, params.host_address, stream_index, params.device_port, true);
+ CHECK_EXPECTED(udp);
+
+ if (params.rate_limit_bytes_per_sec == 0) {
+ local_stream = std::unique_ptr<EthernetInputStream>(
+ new (std::nothrow) EthernetInputStream(device, udp.release(), std::move(core_op_activated_event), edge_layer, status));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else {
+#ifdef _MSC_VER
+ // TODO: Add factory class
+ local_stream = std::unique_ptr<EthernetInputStream>(
+ new (std::nothrow) TokenBucketEthernetInputStream(device, udp.release(),
+ std::move(core_op_activated_event), params.rate_limit_bytes_per_sec, edge_layer, status));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+#else
+ auto stream_expected = TrafficControlEthernetInputStream::create(device, udp.release(),
+ std::move(core_op_activated_event), params.rate_limit_bytes_per_sec, edge_layer);
+ CHECK_EXPECTED(stream_expected);
+ local_stream = stream_expected.release();
+#endif
+ }
+
+ CHECK_AS_EXPECTED((nullptr != local_stream), HAILO_OUT_OF_HOST_MEMORY);
+ local_stream->m_is_stream_activated = false;
+
+ auto device_architecture = eth_device->get_architecture();
+ CHECK_EXPECTED(device_architecture);
+ if ((HAILO_ARCH_HAILO8 == device_architecture.value()) || (HAILO_ARCH_HAILO8L == device_architecture.value())) {
+ local_stream->configuration.use_dataflow_padding = true;
+ }
+ else {
+ local_stream->configuration.use_dataflow_padding = false;
+ }
+
+ local_stream->set_max_payload_size(params.max_payload_size);
+
+ local_stream->configuration.is_sync_enabled = params.is_sync_enabled;
+ if (local_stream->configuration.is_sync_enabled) {
+ status = local_stream->eth_stream__config_input_sync_params(params.frames_per_sync);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ local_stream->configuration.buffers_threshold = params.buffers_threshold;
+
+ return local_stream;
+}
+
+void EthernetInputStream::set_max_payload_size(uint16_t size)
+{
+ if (size > MAX_UDP_PAYLOAD_SIZE) {
+ size = MAX_UDP_PAYLOAD_SIZE;
+ }
+ this->configuration.max_payload_size = size;
+}
+
+hailo_status EthernetInputStream::set_timeout(std::chrono::milliseconds timeout)
+{
+ return m_udp.set_timeout(timeout);
+}
+
+std::chrono::milliseconds EthernetInputStream::get_timeout() const
+{
+ return std::chrono::milliseconds((MILLISECONDS_IN_SECOND * m_udp.m_timeout.tv_sec) + (m_udp.m_timeout.tv_usec / MICROSECONDS_IN_MILLISECOND));
+}
+
+uint16_t EthernetInputStream::get_remote_port()
+{
+ return ntohs(m_udp.m_device_address.sin_port);
+}
+
+/** Output stream **/
+EthernetOutputStream::~EthernetOutputStream()
+{
+ if (m_is_stream_activated) {
+ auto status = this->deactivate_stream();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Close stream failed! (status {} stream index {})", status, m_stream_info.index);
+ }
+ }
+}
+
+hailo_status EthernetOutputStream::deactivate_stream()
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ ASSERT(m_is_stream_activated);
+
+ status = Control::close_stream(m_device, m_dataflow_manager_id, false);
+ CHECK_SUCCESS(status);
+
+ m_is_stream_activated = false;
+
+ return HAILO_SUCCESS;
+}
+
+// Note: Ethernet streams don't work with dynamic batch sizes
+hailo_status EthernetOutputStream::activate_stream(uint16_t /* dynamic_batch_size */, bool /* resume_pending_stream_transfers */)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ CONTROL_PROTOCOL__config_stream_params_t params = {};
+
+ params.nn_stream_config = m_nn_stream_config;
+ params.communication_type = CONTROL_PROTOCOL__COMMUNICATION_TYPE_UDP;
+ params.is_input = false;
+ params.stream_index = m_stream_info.index;
+ params.skip_nn_stream_config = false;
+ // Currently hardcoded assign as there are no power mode optimizations over eth
+ params.power_mode = static_cast<uint8_t>(CONTROL_PROTOCOL__MODE_ULTRA_PERFORMANCE);
+
+ params.communication_params.udp_output.chip_udp_port = (uint16_t)(BYTE_ORDER__htons(m_udp.m_device_address.sin_port));
+ params.communication_params.udp_output.host_udp_port = (uint16_t)(BYTE_ORDER__htons(m_udp.m_host_address.sin_port));
+ params.communication_params.udp_output.max_udp_payload_size = this->configuration.max_payload_size;
+ params.communication_params.udp_output.buffers_threshold = this->configuration.buffers_threshold;
+ params.communication_params.udp_output.use_rtp = false;
+
+ if (this->configuration.is_sync_enabled) {
+ params.communication_params.udp_output.should_send_sync_packets = true;
+ }
+
+ status = Control::config_stream_udp_output(m_device, ¶ms, m_dataflow_manager_id);
+ CHECK_SUCCESS(status);
+
+ status = Control::open_stream(m_device, m_dataflow_manager_id, false);
+ CHECK_SUCCESS(status);
+
+ m_is_stream_activated = true;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status EthernetOutputStream::read_all_no_sync(void *buffer, size_t offset, size_t size) {
+ size_t offset_end = 0;
+ size_t transfer_size = 0;
+
+ offset_end = offset + size;
+ while (offset < offset_end) {
+ transfer_size = offset_end - offset;
+ MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, transfer_size);
+ auto expected_bytes_read = this->sync_read_raw_buffer(buffer_view);
+ if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_read.status()) {
+ LOGGER__INFO("sync_read_raw_buffer was aborted!");
+ return expected_bytes_read.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_read);
+ offset += expected_bytes_read.release();
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status EthernetOutputStream::read_all_with_sync(void *buffer, size_t offset, size_t size) {
+ hailo_status status = HAILO_UNINITIALIZED;
+ size_t initial_offset = offset;
+ size_t offset_end = offset + size;
+ bool got_last_sync_early = false;
+ const size_t frame_size = m_stream_info.hw_frame_size;
+ bool is_batch_invalid = false;
+
+ if ((size % frame_size) != 0) {
+ LOGGER__ERROR("Read size is not a multiple of frame size."
+ "This operation is not possible with the sync packet mode."
+ "Tried to read {} bytes and frame size is {}", size, frame_size);
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ if (this->leftover_size > 0) {
+ memcpy((uint8_t*)buffer + offset, this->leftover_buffer, this->leftover_size);
+ offset += this->leftover_size;
+ // leftover size will be reassigned in the end, but in case the function ends prematurely we will zero it for safety.
+ this->leftover_size = 0;
+ }
+
+ while (offset < offset_end) {
+ size_t transfer_size = offset_end - offset;
+ MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, transfer_size);
+ auto expected_bytes_read = this->sync_read_raw_buffer(buffer_view);
+ status = expected_bytes_read.status();
+ if (HAILO_TIMEOUT == status) {
+ return handle_timeout(buffer, offset, initial_offset, frame_size);
+ } else if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("sync_read_raw_buffer was aborted");
+ return status;
+ } else if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("read failed");
+ return status;
+ }
+ transfer_size = expected_bytes_read.release();
+ if (is_sync_packet(buffer, offset, transfer_size)) {
+ uint32_t sequence_index = BYTE_ORDER__ntohl(((hailo_output_sync_packet_t*)((uint8_t*)buffer + offset))->sequence_index);
+ if (is_sync_expected(offset, initial_offset, frame_size)) {
+ if (sequence_index != (this->last_seen_sync_index + 1)) {
+ // Batch is invalid if a frame was skipped
+ is_batch_invalid = true;
+ LOGGER__WARNING("Received {} frames. Missed sync packets between them, treating the batch as invalid data", sequence_index - this->last_seen_sync_index);
+ }
+ if (sequence_index == this->last_seen_sync_index) {
+ LOGGER__ERROR("Got duplicate sync!");
+ return HAILO_INTERNAL_FAILURE;
+ }
+ } else {
+ size_t number_of_missing_bytes = (frame_size - ((offset - initial_offset) % frame_size));
+ LOGGER__WARNING("Some bytes are missing at frame, padding {} bytes with zeros", number_of_missing_bytes);
+ memset((uint8_t*)buffer + offset, 0, number_of_missing_bytes);
+ offset += number_of_missing_bytes;
+ if (offset == offset_end) {
+ got_last_sync_early = true;
+ }
+ is_batch_invalid = true;
+ }
+ this->last_seen_sync_index = sequence_index;
+ } else {
+ offset += transfer_size;
+ }
+ }
+
+ status = HAILO_SUCCESS;
+
+ if (!got_last_sync_early) {
+ status = get_last_sync();
+ }
+ if (HAILO_SUCCESS == status && is_batch_invalid) {
+ return HAILO_INVALID_FRAME;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status EthernetOutputStream::get_last_sync() {
+ size_t last_packet_size = sizeof(this->leftover_buffer);
+ MemoryView leftover_buffer_view(this->leftover_buffer, last_packet_size);
+ auto expected_bytes_read = sync_read_raw_buffer(leftover_buffer_view);
+ CHECK(HAILO_TIMEOUT != expected_bytes_read.status(), HAILO_INVALID_FRAME, "Got timeout on last sync, marking last frame as invalid");
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_read, "Recv error");
+ last_packet_size = expected_bytes_read.release();
+
+ if (is_sync_packet(this->leftover_buffer, 0, last_packet_size)) {
+ this->leftover_size = 0;
+ } else {
+ LOGGER__WARNING("Received a data packet instead of sync, saving leftover for later frame");
+ this->leftover_size = last_packet_size;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status EthernetOutputStream::handle_timeout(const void* buffer, size_t offset,
+ size_t initial_offset, const size_t frame_size) {
+ // In case data a timeout has occurred, and data was received, try filling missing in frame
+ if (this->encountered_timeout || (offset == initial_offset)) {
+ LOGGER__ERROR("{} (D2H) got timeout (timeout={}ms), unable to complete the frame", name(), get_timeout().count());
+ return HAILO_TIMEOUT;
+ }
+ LOGGER__ERROR("Received timeout. Continuing logic as if a sync packet was received");
+ size_t number_of_missing_bytes = (frame_size - ((offset - initial_offset) % frame_size));
+ LOGGER__ERROR("padding {} bytes with zeros because of timeout", number_of_missing_bytes);
+ memset((uint8_t*)buffer + offset, 0, number_of_missing_bytes);
+ this->encountered_timeout = true;
+ return HAILO_INVALID_FRAME;
+}
+
+bool EthernetOutputStream::is_sync_expected(size_t offset, size_t initial_offset, const size_t frame_size) {
+ return (((offset - initial_offset) % frame_size) == 0) && (offset > initial_offset);
+}
+
+bool EthernetOutputStream::is_sync_packet(const void* buffer, size_t offset, size_t transfer_size) {
+ return (transfer_size == sizeof(hailo_output_sync_packet_t) &&
+ ((hailo_output_sync_packet_t*)((uint8_t*)buffer + offset))->barker == BYTE_ORDER__ntohl(SYNC_PACKET_BARKER));
+}
+
+hailo_status EthernetOutputStream::read_all(MemoryView &buffer)
+{
+ if ((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) != 0) {
+ LOGGER__ERROR("Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ if (this->configuration.is_sync_enabled) {
+ status = this->read_all_with_sync(buffer.data(), 0, buffer.size());
+ } else {
+ status = this->read_all_no_sync(buffer.data(), 0, buffer.size());
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("read_all was aborted!");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<size_t> EthernetOutputStream::sync_read_raw_buffer(MemoryView &buffer)
+{
+ auto status = get_core_op_activated_event()->wait(std::chrono::milliseconds(0));
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED,
+ "Trying to read on stream before its network_group is activated");
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto buffer_size = buffer.size();
+ status = m_udp.recv((uint8_t*)buffer.data(),&buffer_size);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Udp recv was aborted!");
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status, "{} (D2H) failed with status={}", name(), status);
+
+ return buffer_size;
+}
+
+hailo_status EthernetOutputStream::fill_output_stream_ptr_with_info(const hailo_eth_output_stream_params_t ¶ms, EthernetOutputStream *stream)
+{
+ if ((HAILO_FORMAT_ORDER_HAILO_NMS == stream->m_stream_info.format.order)
+ && (params.is_sync_enabled)) {
+ LOGGER__WARNING("NMS is not supported with sync enabled. Setting sync flag to false");
+ stream->configuration.is_sync_enabled = false;
+ } else {
+ stream->configuration.is_sync_enabled = params.is_sync_enabled;
+ }
+
+ stream->configuration.max_payload_size = params.max_payload_size;
+ stream->configuration.buffers_threshold = params.buffers_threshold;
+
+ stream->m_is_stream_activated = false;
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<EthernetOutputStream>> EthernetOutputStream::create(Device &device,
+ const LayerInfo &edge_layer, const hailo_eth_output_stream_params_t ¶ms, EventPtr core_op_activated_event)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ std::unique_ptr<EthernetOutputStream> local_stream = nullptr;
+ // TODO: try to avoid cast
+ auto eth_device = reinterpret_cast<EthernetDevice*>(&device);
+
+ const auto stream_index = edge_layer.stream_index;
+ auto udp = eth_stream__create_udp(eth_device, params.host_address, stream_index, params.device_port, false);
+ CHECK_EXPECTED(udp);
+ local_stream = std::unique_ptr<EthernetOutputStream>(new (std::nothrow) EthernetOutputStream(device,
+ edge_layer,
+ udp.release(), std::move(core_op_activated_event), status));
+ CHECK((nullptr != local_stream), make_unexpected(HAILO_OUT_OF_HOST_MEMORY));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = fill_output_stream_ptr_with_info(params, local_stream.get());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return local_stream;
+}
+
+hailo_status EthernetOutputStream::set_timeout(std::chrono::milliseconds timeout)
+{
+ return m_udp.set_timeout(timeout);
+}
+
+std::chrono::milliseconds EthernetOutputStream::get_timeout() const
+{
+ return std::chrono::milliseconds((MILLISECONDS_IN_SECOND * m_udp.m_timeout.tv_sec) + (m_udp.m_timeout.tv_usec / MICROSECONDS_IN_MILLISECOND));
+}
+
+hailo_status EthernetOutputStream::abort()
+{
+ return m_udp.abort();
+}
+
+hailo_status EthernetInputStream::abort()
+{
+ return m_udp.abort();
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file eth_stream.hpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#ifndef HAILO_ETH_STREAM_H_
+#define HAILO_ETH_STREAM_H_
+
+#include "hailo/hailort.h"
+#include "hailo/hef.hpp"
+#include "hailo/device.hpp"
+#include "hailo/event.hpp"
+
+#include "eth/token_bucket.hpp"
+#include "eth/udp.hpp"
+#include "stream_common/stream_internal.hpp"
+
+#if defined(__GNUC__)
+#include "common/os/posix/traffic_control.hpp"
+#endif
+
+
+namespace hailort
+{
+
+// TODO: move those structs to hailort.h when implemented
+typedef struct {
+ uint16_t max_payload_size;
+ bool use_dataflow_padding;
+ bool is_sync_enabled;
+ uint32_t frames_per_sync;
+ uint32_t packets_per_frame;
+ uint16_t sync_size;
+ uint32_t buffers_threshold;
+} hailo_stream_eth_input_configuration_t;
+
+typedef struct {
+ uint16_t max_payload_size;
+ bool is_sync_enabled;
+ uint32_t buffers_threshold;
+} hailo_stream_eth_output_configuration_t;
+
+class EthernetInputStream : public InputStreamBase {
+private:
+ hailo_stream_eth_input_configuration_t configuration;
+ Udp m_udp;
+ bool m_is_stream_activated;
+ Device &m_device;
+
+ hailo_status eth_stream__config_input_sync_params(uint32_t frames_per_sync);
+ hailo_status eth_stream__write_all_no_sync(void *buffer, size_t offset, size_t size);
+ hailo_status eth_stream__write_all_with_sync(void *buffer, size_t offset, size_t size);
+ hailo_status set_timeout(std::chrono::milliseconds timeout);
+ void set_max_payload_size(uint16_t size);
+
+protected:
+ virtual hailo_status eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size);
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
+ virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
+
+public:
+ EthernetInputStream(Device &device, Udp &&udp, EventPtr &&core_op_activated_event, const LayerInfo &layer_info, hailo_status &status) :
+ InputStreamBase(layer_info, HAILO_STREAM_INTERFACE_ETH, std::move(core_op_activated_event), status), m_udp(std::move(udp)), m_device(device) {}
+ virtual ~EthernetInputStream();
+
+ static Expected<std::unique_ptr<EthernetInputStream>> create(Device &device,
+ const LayerInfo &edge_layer, const hailo_eth_input_stream_params_t ¶ms, EventPtr core_op_activated_event);
+
+ uint16_t get_remote_port();
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_ETH; }
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override {return HAILO_SUCCESS;}; // TODO (HRT-3799): clear abort state in the eth stream
+};
+
+class EthernetInputStreamRateLimited : public EthernetInputStream {
+protected:
+ const uint32_t rate_bytes_per_sec;
+
+public:
+ EthernetInputStreamRateLimited(Device &device, Udp &&udp, EventPtr &&core_op_activated_event,
+ uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status);
+ virtual ~EthernetInputStreamRateLimited() = default;
+};
+
+class TokenBucketEthernetInputStream : public EthernetInputStreamRateLimited {
+private:
+ DynamicTokenBucket token_bucket;
+ // Note:
+ // * We set the token bucket's burst size to be our MTU. If we'd use larger burst sizes
+ // we could send packets faster than the desired rate.
+ // * We send packets with at most MAX_UDP_PAYLOAD_SIZE bytes of data. Hence we won't
+ // consume more than MAX_UDP_PAYLOAD_SIZE tokens from the token bucket.
+ static const uint32_t BURST_SIZE = MAX_UDP_PAYLOAD_SIZE;
+ static const uint32_t MAX_CONSUME_SIZE = MAX_UDP_PAYLOAD_SIZE;
+
+protected:
+ virtual hailo_status eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size);
+
+public:
+ TokenBucketEthernetInputStream(Device &device, Udp &&udp, EventPtr &&core_op_activated_event,
+ uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status);
+ virtual ~TokenBucketEthernetInputStream() = default;
+};
+
+
+#if defined(__GNUC__)
+class TrafficControlEthernetInputStream : public EthernetInputStreamRateLimited {
+public:
+ static Expected<std::unique_ptr<TrafficControlEthernetInputStream>> create(Device &device, Udp &&udp,
+ EventPtr &&core_op_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info);
+ virtual ~TrafficControlEthernetInputStream() = default;
+
+private:
+ TrafficControlEthernetInputStream(Device &device, Udp &&udp, EventPtr &&core_op_activated_event,
+ uint32_t rate_bytes_per_sec, TrafficControl &&tc, const LayerInfo &layer_info, hailo_status &status);
+ static Expected<std::string> get_interface_address(const struct in_addr *addr);
+
+ TrafficControl m_tc;
+};
+#endif
+
+class EthernetOutputStream : public OutputStreamBase {
+private:
+ uint8_t leftover_buffer[MAX_UDP_PAYLOAD_SIZE];
+ size_t leftover_size = 0;
+ uint32_t last_seen_sync_index;
+ bool encountered_timeout;
+ hailo_stream_eth_output_configuration_t configuration;
+ Udp m_udp;
+ bool m_is_stream_activated;
+ Device &m_device;
+
+ EthernetOutputStream(Device &device, const LayerInfo &edge_layer, Udp &&udp, EventPtr &&core_op_activated_event, hailo_status &status) :
+ OutputStreamBase(edge_layer, std::move(core_op_activated_event), status),
+ leftover_buffer(),
+ leftover_size(0),
+ // Firmware starts sending sync sequence from 0, so treating the first previous as max value (that will be overflowed to 0)
+ last_seen_sync_index(std::numeric_limits<uint32_t>::max()),
+ encountered_timeout(false),
+ configuration(),
+ m_udp(std::move(udp)),
+ m_device(device)
+ {}
+
+ hailo_status read_all(MemoryView &buffer) override;
+ hailo_status read_all_with_sync(void *buffer, size_t offset, size_t size);
+ hailo_status read_all_no_sync(void *buffer, size_t offset, size_t size);
+
+ static bool is_sync_packet(const void* buffer, size_t offset, size_t transfer_size);
+ static bool is_sync_expected(size_t offset, size_t initial_offset, size_t frame_size);
+ hailo_status handle_timeout(const void* buffer, size_t offset, size_t initial_offset, size_t frame_size);
+ hailo_status set_timeout(std::chrono::milliseconds timeout);
+ hailo_status get_last_sync();
+
+ static hailo_status fill_output_stream_ptr_with_info(const hailo_eth_output_stream_params_t ¶ms, EthernetOutputStream *stream);
+
+public:
+ virtual ~EthernetOutputStream();
+
+ virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer);
+
+ static Expected<std::unique_ptr<EthernetOutputStream>> create(Device &device, const LayerInfo &edge_layer,
+ const hailo_eth_output_stream_params_t ¶ms, EventPtr core_op_activated_event);
+
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_ETH; }
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override {return HAILO_SUCCESS;}; // TODO (HRT-3799): clear abort state in the eth stream
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_ETH_STREAM_H_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hcp_config_activated_core_op.cpp
+ * @brief HcpConfigActivatedCoreOp implementation
+ **/
+
+#include "eth/hcp_config_activated_core_op.hpp"
+#include "device_common/control.hpp"
+
+
+namespace hailort
+{
+
+Expected<HcpConfigActivatedCoreOp> HcpConfigActivatedCoreOp::create(Device &device, std::vector<WriteMemoryInfo> &config,
+ const std::string &core_op_name,
+ // hailo_activate_network_group_params_t is currently an empty holder, if anything will be added to it,
+ // it will require a check that these params will be relevant for this one core op only.
+ const hailo_activate_network_group_params_t &network_group_params,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ ActiveCoreOpHolder &active_core_op_holder,
+ hailo_power_mode_t power_mode, EventPtr core_op_activated_event,
+ CoreOp &core_op)
+{
+ CHECK(!active_core_op_holder.is_any_active(), make_unexpected(HAILO_INVALID_OPERATION),
+ "core-op is currently active. You must deactivate before activating another core-op");
+
+ // Close older dataflows
+ auto status = Control::close_all_streams(device);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // Reset nn_core before writing configurations
+ status = device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ for (auto &m : config) {
+ status = device.write_memory(m.address, MemoryView(m.data));
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ HcpConfigActivatedCoreOp object(device, active_core_op_holder, core_op_name, network_group_params, input_streams, output_streams,
+ power_mode, std::move(core_op_activated_event), core_op, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return object;
+}
+
+HcpConfigActivatedCoreOp::HcpConfigActivatedCoreOp(
+ Device &device,
+ ActiveCoreOpHolder &active_core_op_holder,
+ const std::string &core_op_name,
+ const hailo_activate_network_group_params_t &network_group_params,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ hailo_power_mode_t power_mode,
+ EventPtr &&core_op_activated_event,
+ CoreOp &core_op, hailo_status &status) :
+ ActivatedCoreOp(network_group_params, input_streams, output_streams,
+ std::move(core_op_activated_event), status),
+ m_active_core_op_holder(active_core_op_holder),
+ m_is_active(true),
+ m_power_mode(power_mode),
+ m_device(device),
+ m_core_op_name(core_op_name)
+{
+ // Validate ActivatedCoreOp status
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+ status = core_op.activate_impl(CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to activate core-op");
+ return;
+ }
+}
+
+HcpConfigActivatedCoreOp::~HcpConfigActivatedCoreOp()
+{
+ if (!m_is_active) {
+ return;
+ }
+
+ auto expected_config_network_ref = m_active_core_op_holder.get();
+ if (!expected_config_network_ref.has_value()) {
+ LOGGER__ERROR("Error getting configured core-op");
+ return;
+ }
+ const auto &config_core_op = expected_config_network_ref.value();
+
+ const auto status = config_core_op.get().deactivate_impl();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to deactivate core-op");
+ }
+}
+
+// TODO: add get_core_op_name() for better code readability?
+const std::string &HcpConfigActivatedCoreOp::get_network_group_name() const
+{
+ // network_group name is the same as core_op name in this case.
+ // HcpConfigActivatedCoreOp should be used only for single core ops network groups.
+ return m_core_op_name;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hcp_config_activated_core_op.hpp
+ * @brief Represent activated core-op from HEF.
+ *
+ * This core-op can be used for control-core-op only (for etherent or pcie)
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_CORE_OP_HPP_
+#define _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_CORE_OP_HPP_
+
+#include "hailo/device.hpp"
+
+#include "common/utils.hpp"
+
+#include "core_op/active_core_op_holder.hpp"
+
+#include <vector>
+#include <map>
+
+
+namespace hailort
+{
+
+struct WriteMemoryInfo
+{
+ uint32_t address;
+ Buffer data;
+};
+
+class HcpConfigActivatedCoreOp : public ActivatedCoreOp
+{
+ public:
+ static Expected<HcpConfigActivatedCoreOp> create(Device &device, std::vector<WriteMemoryInfo> &config,
+ const std::string &core_op_name,
+ const hailo_activate_network_group_params_t &network_group_params,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ ActiveCoreOpHolder &active_core_op_holder,
+ hailo_power_mode_t power_mode, EventPtr core_op_activated_event,
+ CoreOp &core_op);
+
+ virtual ~HcpConfigActivatedCoreOp();
+ HcpConfigActivatedCoreOp(const HcpConfigActivatedCoreOp &) = delete;
+ HcpConfigActivatedCoreOp &operator=(const HcpConfigActivatedCoreOp &) = delete;
+ HcpConfigActivatedCoreOp &operator=(HcpConfigActivatedCoreOp &&) = delete;
+ HcpConfigActivatedCoreOp(HcpConfigActivatedCoreOp &&other) noexcept :
+ ActivatedCoreOp(std::move(other)), m_active_core_op_holder(other.m_active_core_op_holder),
+ m_is_active(std::exchange(other.m_is_active, false)), m_power_mode(other.m_power_mode),
+ m_device(other.m_device), m_core_op_name(std::move(other.m_core_op_name)) {};
+
+ virtual const std::string &get_network_group_name() const override;
+
+ virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &/*key*/) override
+ {
+ LOGGER__ERROR("get_intermediate_buffer() is not supported on single_context core_ops");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ virtual hailo_status set_keep_nn_config_during_reset(const bool /* keep_nn_config_during_reset */) override
+ {
+ LOGGER__ERROR("set_keep_nn_config_during_reset() is not supported on single_context core_ops");
+ return HAILO_INVALID_OPERATION;
+ }
+
+ private:
+ HcpConfigActivatedCoreOp(Device &device, ActiveCoreOpHolder &active_core_op_holder,
+ const std::string &core_op_name,
+ const hailo_activate_network_group_params_t &network_group_params,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ hailo_power_mode_t power_mode, EventPtr &&core_op_activated_event,
+ CoreOp &core_op, hailo_status &status);
+
+ ActiveCoreOpHolder &m_active_core_op_holder;
+ bool m_is_active;
+ hailo_power_mode_t m_power_mode;
+ Device &m_device;
+ std::string m_core_op_name;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_CORE_OP_HPP_ */
--- /dev/null
+#include "eth/hcp_config_core_op.hpp"
+#include "device_common/control.hpp"
+
+
+#define OUTPUT_CHANNEL_INDEX_OFFSET (16)
+
+
+namespace hailort
+{
+
+HcpConfigCoreOp::HcpConfigCoreOp(Device &device, ActiveCoreOpHolder &active_core_op_holder,
+ std::vector<WriteMemoryInfo> &&config, const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata,
+ hailo_status &status)
+ : CoreOp(config_params, metadata, status),
+ m_config(std::move(config)), m_active_core_op_holder(active_core_op_holder), m_device(device)
+{}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> HcpConfigCoreOp::create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t /* dynamic_batch_size */,
+ bool /* resume_pending_stream_transfers */)
+{
+ auto start_time = std::chrono::steady_clock::now();
+
+ auto activated_net_group = HcpConfigActivatedCoreOp::create(m_device, m_config, name(), network_group_params,
+ m_input_streams, m_output_streams, m_active_core_op_holder, m_config_params.power_mode,
+ m_core_op_activated_event, (*this));
+ CHECK_EXPECTED(activated_net_group);
+
+ std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr = make_unique_nothrow<HcpConfigActivatedCoreOp>(activated_net_group.release());
+ CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
+ LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and thus the network is not fully activated yet.", name(), elapsed_time_ms);
+
+ return activated_net_group_ptr;
+}
+
+Expected<hailo_stream_interface_t> HcpConfigCoreOp::get_default_streams_interface()
+{
+ return m_device.get_default_streams_interface();
+}
+
+bool HcpConfigCoreOp::is_scheduled() const
+{
+ // Scheduler not supported on HcpConfigCoreOp
+ return false;
+}
+
+hailo_status HcpConfigCoreOp::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
+{
+ (void) timeout;
+ (void) network_name;
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status HcpConfigCoreOp::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
+{
+ (void) threshold;
+ (void) network_name;
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status HcpConfigCoreOp::set_scheduler_priority(uint8_t /*priority*/, const std::string &/*network_name*/)
+{
+ return HAILO_INVALID_OPERATION;
+}
+
+Expected<std::shared_ptr<LatencyMetersMap>> HcpConfigCoreOp::get_latency_meters()
+{
+ /* hcp does not support latnecy. return empty map */
+ LatencyMetersMap empty_map;
+ return make_shared_nothrow<LatencyMetersMap>(empty_map);
+}
+
+Expected<vdma::BoundaryChannelPtr> HcpConfigCoreOp::get_boundary_vdma_channel_by_stream_name(
+ const std::string &stream_name)
+{
+ LOGGER__ERROR("get_boundary_vdma_channel_by_stream_name function for stream name {} is not supported on ETH core-ops",
+ stream_name);
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status HcpConfigCoreOp::activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ m_active_core_op_holder.set(*this);
+
+ auto status = activate_low_level_streams(dynamic_batch_size, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status, "Failed activating low level streams");
+
+ status = m_core_op_activated_event->signal();
+ CHECK_SUCCESS(status, "Failed to signal network activation event");
+
+ return HAILO_SUCCESS;
+}
+hailo_status HcpConfigCoreOp::deactivate_impl(bool /* keep_nn_config_during_reset */)
+{
+ auto expected_core_op_ref = m_active_core_op_holder.get();
+ CHECK(expected_core_op_ref.has_value(), HAILO_INTERNAL_FAILURE, "Error getting configured core-op");
+
+ const auto &core_op = expected_core_op_ref.value();
+ // Make sure the core-op we are deactivating is this object
+ CHECK(this == std::addressof(core_op.get()), HAILO_INTERNAL_FAILURE,
+ "Trying to deactivate different core-op");
+
+ m_active_core_op_holder.clear();
+
+ if (!m_core_op_activated_event) {
+ return HAILO_SUCCESS;
+ }
+
+ m_core_op_activated_event->reset();
+
+ for (auto &name_pair : m_input_streams) {
+ const auto status = name_pair.second->flush();
+ CHECK_SUCCESS(status, "Failed to flush input stream {}", name_pair.first);
+ }
+
+ auto status = deactivate_low_level_streams();
+ CHECK_SUCCESS(status, "Failed deactivating low level streams");
+
+ return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hcp_config_core_op.hpp
+ * @brief Represent core-op from HEF file that can be activated
+ *
+ * This core-op can be used for control-core-op (for etherent or pcie)
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_HCP_CONFIG_CORE_OP_HPP_
+#define _HAILO_CONTEXT_SWITCH_HCP_CONFIG_CORE_OP_HPP_
+
+#include "hailo/device.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/utils.hpp"
+
+#include "eth/hcp_config_activated_core_op.hpp"
+#include "core_op/active_core_op_holder.hpp"
+#include "core_op/core_op.hpp"
+
+#include <vector>
+#include <map>
+
+
+namespace hailort
+{
+
+class HcpConfigCoreOp : public CoreOp
+{
+public:
+ HcpConfigCoreOp(
+ Device &device, ActiveCoreOpHolder &active_core_op_holder, std::vector<WriteMemoryInfo> &&config,
+ const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status);
+
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
+ bool resume_pending_stream_transfers) override;
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
+
+ virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
+ virtual Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(
+ const std::string &stream_name) override;
+ virtual bool is_scheduled() const override;
+ virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override;
+
+ virtual hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_impl(bool keep_nn_config_during_reset) override;
+
+ virtual ~HcpConfigCoreOp() = default;
+ HcpConfigCoreOp(const HcpConfigCoreOp &other) = delete;
+ HcpConfigCoreOp &operator=(const HcpConfigCoreOp &other) = delete;
+ HcpConfigCoreOp &operator=(HcpConfigCoreOp &&other) = delete;
+ HcpConfigCoreOp(HcpConfigCoreOp &&other) noexcept : CoreOp(std::move(other)),
+ m_config(std::move(other.m_config)), m_active_core_op_holder(other.m_active_core_op_holder),
+ m_device(other.m_device) {}
+
+private:
+ std::vector<WriteMemoryInfo> m_config;
+ ActiveCoreOpHolder &m_active_core_op_holder;
+ Device &m_device;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_HCP_CONFIG_CORE_OP_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file network_rate_calculator.cpp
+ * @brief: Network rate calculator
+ **/
+
+
+#include "hailo/hailort.h"
+#include "hailo/network_rate_calculator.hpp"
+
+#include "common/utils.hpp"
+
+#include "eth/eth_stream.hpp"
+
+#include <numeric>
+#include <algorithm>
+
+
+namespace hailort
+{
+
+Expected<StreamInfoVector> NetworkUdpRateCalculator::get_streams_from_hef(Hef* hef, const std::string &network_group_name)
+{
+ assert(nullptr != hef);
+
+ auto all_streams_infos = hef->get_all_stream_infos(network_group_name);
+ CHECK_EXPECTED(all_streams_infos);
+
+ // We expect to have two or more streams (atleast one for input and one for output)
+ if (all_streams_infos->size() < 2) {
+ return make_unexpected(HAILO_INVALID_HEF);
+ }
+
+ return all_streams_infos;
+}
+
+NetworkUdpRateCalculator::NetworkUdpRateCalculator(std::map<std::string, uint32_t> &&input_edge_shapes,
+ std::map<std::string, uint32_t> &&output_edge_shapes) :
+ m_input_edge_shapes(std::move(input_edge_shapes)),
+ m_output_edge_shapes(std::move(output_edge_shapes)) {}
+
+Expected<NetworkUdpRateCalculator> NetworkUdpRateCalculator::create(Hef* hef, const std::string &network_group_name)
+{
+ if (hef == nullptr) {
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+ const auto stream_infos = get_streams_from_hef(hef, network_group_name);
+ if (!stream_infos) {
+ return make_unexpected(stream_infos.status());
+ }
+
+ // Working with HEF for rate_calcs assums that all streams are udp streams
+ std::map<std::string, uint32_t> input_udp_edge_shapes;
+ std::map<std::string, uint32_t> output_udp_edge_shapes;
+ for (auto &info : stream_infos.value()) {
+ if (HAILO_H2D_STREAM == info.direction) {
+ input_udp_edge_shapes.insert(std::make_pair(info.name, info.hw_frame_size));
+ } else if (HAILO_D2H_STREAM == info.direction) {
+ output_udp_edge_shapes.insert(std::make_pair(info.name, info.hw_frame_size));
+ } else {
+ LOGGER__ERROR("Invalid stream direction for stream {}.", info.name);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+ }
+
+ return NetworkUdpRateCalculator(std::move(input_udp_edge_shapes), std::move(output_udp_edge_shapes));
+}
+
+Expected<NetworkUdpRateCalculator> NetworkUdpRateCalculator::create(ConfiguredNetworkGroup &net_group)
+{
+ auto udp_input_streams = net_group.get_input_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
+ CHECK_AS_EXPECTED(!udp_input_streams.empty(), HAILO_INVALID_OPERATION,
+ "There are no udp input streams in this network_group.");
+ auto udp_output_streams = net_group.get_output_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
+
+ std::map<std::string, uint32_t> input_udp_edge_shapes;
+ for (const auto &stream : udp_input_streams) {
+ input_udp_edge_shapes.insert(std::make_pair(stream.get().name(),
+ stream.get().get_info().hw_frame_size));
+ }
+ std::map<std::string, uint32_t> output_udp_edge_shapes;
+ for (const auto &stream : udp_output_streams) {
+ output_udp_edge_shapes.insert(std::make_pair(stream.get().name(),
+ stream.get().get_info().hw_frame_size));
+ }
+
+ return NetworkUdpRateCalculator(std::move(input_udp_edge_shapes), std::move(output_udp_edge_shapes));
+}
+
+Expected<std::map<std::string, uint32_t>> NetworkUdpRateCalculator::calculate_inputs_bandwith(uint32_t fps,
+ uint32_t max_supported_bandwidth)
+{
+ if (1 > fps) {
+ fps = 1;
+ LOGGER__WARNING("FPS for rate calculations cannot be smaller than 1. calculating rate_limiter with fps=1.");
+ }
+
+ std::map<std::string, uint32_t> input_rates;
+ std::transform(m_input_edge_shapes.begin(), m_input_edge_shapes.end(), std::inserter(input_rates, input_rates.end()),
+ [fps](auto &input_edge_pair) { return std::make_pair(input_edge_pair.first, (fps * input_edge_pair.second)); });
+
+ std::map<std::string, uint32_t> output_rates = {};
+ std::transform(m_output_edge_shapes.begin(), m_output_edge_shapes.end(), std::inserter(output_rates, output_rates.end()),
+ [fps](auto &output_edge_pair) { return std::make_pair(output_edge_pair.first, (fps * output_edge_pair.second)); });
+
+ uint32_t total_input_rate = std::accumulate(input_rates.begin(), input_rates.end(), 0,
+ [](int value, const auto &p) { return value + p.second; });
+ uint32_t total_output_rate = std::accumulate(output_rates.begin(), output_rates.end(), 0,
+ [](int value, const auto &p) { return value + p.second; });
+
+ if ((total_input_rate > max_supported_bandwidth) || (total_output_rate > max_supported_bandwidth)) {
+ LOGGER__WARNING("Requested rate (input: {} Bps, output: {} Bps) is high and might be unstable. Setting rate to {}.",
+ total_input_rate, total_output_rate, max_supported_bandwidth);
+ if (total_output_rate > total_input_rate) {
+ // Output is bigger than max rate. Adjusting input rate accordingly
+ auto input_output_ratio = (total_input_rate / total_output_rate);
+ LOGGER__WARNING("Output Bps ({}) is bigger than input Bps ({}) output (ratio is: {})", total_output_rate,
+ total_input_rate, input_output_ratio);
+ max_supported_bandwidth *= input_output_ratio;
+ }
+ auto total_inputs_rate_to_max_supported_ratio = (static_cast<float64_t>(max_supported_bandwidth) / total_input_rate);
+ for (auto &rate_pair : input_rates) {
+ auto rate = rate_pair.second * total_inputs_rate_to_max_supported_ratio;
+ rate_pair.second = static_cast<uint32_t>(rate);
+ }
+ }
+
+ return input_rates;
+}
+
+Expected<std::map<uint16_t, uint32_t>> NetworkUdpRateCalculator::get_udp_ports_rates_dict(
+ std::vector<std::reference_wrapper<InputStream>> &udp_input_streams, uint32_t fps, uint32_t max_supported_bandwidth)
+{
+ auto rates_per_name = calculate_inputs_bandwith(fps, max_supported_bandwidth);
+ CHECK_EXPECTED(rates_per_name);
+
+ std::map<uint16_t, uint32_t> results = {};
+ for (const auto &input_stream : udp_input_streams) {
+ uint16_t remote_port = 0;
+ remote_port = reinterpret_cast<EthernetInputStream*>(&(input_stream.get()))->get_remote_port();
+ results.insert(std::make_pair(remote_port,
+ rates_per_name->at(input_stream.get().name())));
+ }
+
+ return results;
+}
+
+} /* namespace hailort */
--- /dev/null
+// Note:
+// * This module is taken from Facebook's open source Folly library: https://github.com/facebook/folly (v2020.08.17.00)
+// * Changes:
+// * Changes made to the module are delimited with "BEGIN/END HAILO CHANGES"
+// * The file has been renamed from "TokenBucket.h" to "token_bucket.hpp"
+// * Removed:
+// * folly namespace
+// * BasicTokenBucket
+// * From BasicDynamicTokenBucket:
+// * Copy ctor and assignment operator
+// * available()
+// * reset()
+// * Original file: https://github.com/facebook/folly/blob/v2020.08.17.00/folly/TokenBucket.h
+// * Copyright notices follow.
+
+/*
+ * Copyright (c) Facebook, Inc. and its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TOKEN_BUCKET_HPP_
+#define TOKEN_BUCKET_HPP_
+
+#include <algorithm>
+#include <atomic>
+#include <chrono>
+#include <thread>
+
+// BEGIN HAILO CHANGES
+#include <hailo/hailort.h>
+#include "hailo/expected.hpp"
+#include "os/microsec_timer.hpp"
+
+namespace hailort
+{
+// END HAILO CHANGES
+
+/**
+ * Thread-safe (atomic) token bucket implementation.
+ *
+ * A token bucket (http://en.wikipedia.org/wiki/Token_bucket) models a stream
+ * of events with an average rate and some amount of burstiness. The canonical
+ * example is a packet switched network: the network can accept some number of
+ * bytes per second and the bytes come in finite packets (bursts). A token
+ * bucket stores up to a fixed number of tokens (the burst size). Some number
+ * of tokens are removed when an event occurs. The tokens are replenished at a
+ * fixed rate. Failure to allocate tokens implies resource is unavailable and
+ * caller needs to implement its own retry mechanism. For simple cases where
+ * caller is okay with a FIFO starvation-free scheduling behavior, there are
+ * also APIs to 'borrow' from the future effectively assigning a start time to
+ * the caller when it should proceed with using the resource. It is also
+ * possible to 'return' previously allocated tokens to make them available to
+ * other users. Returns in excess of burstSize are considered expired and
+ * will not be available to later callers.
+ *
+ * This implementation records the last time it was updated. This allows the
+ * token bucket to add tokens "just in time" when tokens are requested.
+ *
+ * The "dynamic" base variant allows the token generation rate and maximum
+ * burst size to change with every token consumption.
+ *
+ * @tparam Clock Clock type, must be steady i.e. monotonic.
+ */
+template <typename Clock = std::chrono::steady_clock>
+class BasicDynamicTokenBucket {
+ static_assert(Clock::is_steady, "clock must be steady");
+
+ public:
+ /**
+ * Constructor.
+ *
+ * @param zeroTime Initial time at which to consider the token bucket
+ * starting to fill. Defaults to 0, so by default token
+ * buckets are "full" after construction.
+ */
+ explicit BasicDynamicTokenBucket(double zeroTime = 0) noexcept
+ : zeroTime_(zeroTime) {}
+
+ BasicDynamicTokenBucket(const BasicDynamicTokenBucket&) = delete;
+ BasicDynamicTokenBucket& operator=(const BasicDynamicTokenBucket&) = delete;
+
+ // BEGIN HAILO CHANGES
+ BasicDynamicTokenBucket(BasicDynamicTokenBucket&& other) :
+ zeroTime_(other.zeroTime_.load())
+ {}
+ // END HAILO CHANGES
+
+
+ /**
+ * Returns the current time in seconds since Epoch.
+ */
+ static double defaultClockNow() noexcept {
+ auto const now = Clock::now().time_since_epoch();
+ return std::chrono::duration<double>(now).count();
+ }
+
+ /**
+ * Attempts to consume some number of tokens. Tokens are first added to the
+ * bucket based on the time elapsed since the last attempt to consume tokens.
+ * Note: Attempts to consume more tokens than the burst size will always
+ * fail.
+ *
+ * Thread-safe.
+ *
+ * @param toConsume The number of tokens to consume.
+ * @param rate Number of tokens to generate per second.
+ * @param burstSize Maximum burst size. Must be greater than 0.
+ * @param nowInSeconds Current time in seconds. Should be monotonically
+ * increasing from the nowInSeconds specified in
+ * this token bucket's constructor.
+ * @return True if the rate limit check passed, false otherwise.
+ */
+ bool consume(
+ double toConsume,
+ double rate,
+ double burstSize,
+ double nowInSeconds = defaultClockNow()) {
+ assert(rate > 0);
+ assert(burstSize > 0);
+
+ if (nowInSeconds <= zeroTime_.load()) {
+ return 0;
+ }
+
+ return consumeImpl(
+ rate, burstSize, nowInSeconds, [toConsume](double& tokens) {
+ if (tokens < toConsume) {
+ return false;
+ }
+ tokens -= toConsume;
+ return true;
+ });
+ }
+
+ /**
+ * Similar to consume, but always consumes some number of tokens. If the
+ * bucket contains enough tokens - consumes toConsume tokens. Otherwise the
+ * bucket is drained.
+ *
+ * Thread-safe.
+ *
+ * @param toConsume The number of tokens to consume.
+ * @param rate Number of tokens to generate per second.
+ * @param burstSize Maximum burst size. Must be greater than 0.
+ * @param nowInSeconds Current time in seconds. Should be monotonically
+ * increasing from the nowInSeconds specified in
+ * this token bucket's constructor.
+ * @return number of tokens that were consumed.
+ */
+ double consumeOrDrain(
+ double toConsume,
+ double rate,
+ double burstSize,
+ double nowInSeconds = defaultClockNow()) {
+ assert(rate > 0);
+ assert(burstSize > 0);
+
+ if (nowInSeconds <= zeroTime_.load()) {
+ return 0;
+ }
+
+ double consumed;
+ consumeImpl(
+ rate, burstSize, nowInSeconds, [&consumed, toConsume](double& tokens) {
+ if (tokens < toConsume) {
+ consumed = tokens;
+ tokens = 0.0;
+ } else {
+ consumed = toConsume;
+ tokens -= toConsume;
+ }
+ return true;
+ });
+ return consumed;
+ }
+
+ /**
+ * Return extra tokens back to the bucket. This will move the zeroTime_
+ * value back based on the rate.
+ *
+ * Thread-safe.
+ */
+ void returnTokens(double tokensToReturn, double rate) {
+ assert(rate > 0);
+ assert(tokensToReturn > 0);
+
+ returnTokensImpl(tokensToReturn, rate);
+ }
+
+ // BEGIN HAILO CHANGES
+ /**
+ * Like consumeOrDrain but the call will always satisfy the asked for count.
+ * It does so by borrowing tokens from the future (zeroTime_ will move
+ * forward) if the currently available count isn't sufficient.
+ *
+ * Returns a Expected<double>. The Expected wont be set if the request
+ * cannot be satisfied: only case is when it is larger than burstSize. The
+ * value of the Expected is a double indicating the time in seconds that the
+ * caller needs to wait at which the reservation becomes valid. The caller
+ * could simply sleep for the returned duration to smooth out the allocation
+ * to match the rate limiter or do some other computation in the meantime. In
+ * any case, any regular consume or consumeOrDrain calls will fail to allocate
+ * any tokens until the future time is reached.
+ *
+ * Note: It is assumed the caller will not ask for a very large count nor use
+ * it immediately (if not waiting inline) as that would break the burst
+ * prevention the limiter is meant to be used for.
+ *
+ * Thread-safe.
+ */
+ Expected<double> consumeWithBorrowNonBlocking(
+ double toConsume,
+ double rate,
+ double burstSize,
+ double nowInSeconds = defaultClockNow()) {
+ assert(rate > 0);
+ assert(burstSize > 0);
+
+ if (burstSize < toConsume) {
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+
+ while (toConsume > 0) {
+ double consumed =
+ consumeOrDrain(toConsume, rate, burstSize, nowInSeconds);
+ if (consumed > 0) {
+ toConsume -= consumed;
+ } else {
+ double zeroTimeNew = returnTokensImpl(-toConsume, rate);
+ double napTime = std::max(0.0, zeroTimeNew - nowInSeconds);
+ return napTime;
+ }
+ }
+ return 0;
+ }
+
+ /**
+ * Convenience wrapper around non-blocking borrow to sleep inline until
+ * reservation is valid.
+ */
+ bool consumeWithBorrowAndWait(
+ double toConsume,
+ double rate,
+ double burstSize,
+ double nowInSeconds = defaultClockNow()) {
+ auto res = consumeWithBorrowNonBlocking(toConsume, rate, burstSize, nowInSeconds);
+ if (!res.has_value()) {
+ return false;
+ }
+ if (res.value() > 0) {
+ MicrosecTimer::sleep(static_cast<uint64_t>(res.value() * 1000000));
+ }
+ return true;
+ }
+ // END HAILO CHANGES
+
+ private:
+ template <typename TCallback>
+ bool consumeImpl(
+ double rate,
+ double burstSize,
+ double nowInSeconds,
+ const TCallback& callback) {
+ auto zeroTimeOld = zeroTime_.load();
+ double zeroTimeNew;
+ do {
+ auto tokens = std::min((nowInSeconds - zeroTimeOld) * rate, burstSize);
+ if (!callback(tokens)) {
+ return false;
+ }
+ zeroTimeNew = nowInSeconds - tokens / rate;
+ } while (!zeroTime_.compare_exchange_weak(zeroTimeOld, zeroTimeNew));
+
+ return true;
+ }
+
+ /**
+ * Adjust zeroTime based on rate and tokenCount and return the new value of
+ * zeroTime_. Note: Token count can be negative to move the zeroTime_ value
+ * into the future.
+ */
+ double returnTokensImpl(double tokenCount, double rate) {
+ auto zeroTimeOld = zeroTime_.load();
+ double zeroTimeNew;
+ do {
+ zeroTimeNew = zeroTimeOld - tokenCount / rate;
+ } while (!zeroTime_.compare_exchange_weak(zeroTimeOld, zeroTimeNew));
+ return zeroTimeNew;
+ }
+
+ std::atomic<double> zeroTime_;
+};
+
+using DynamicTokenBucket = BasicDynamicTokenBucket<>;
+
+} /* namespace hailort */
+
+#endif /* TOKEN_BUCKET_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file udp.cpp
+ * @brief Socket wrapper for Unix
+ **/
+
+#include "hailo/hailort.h"
+
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+#include "common/socket.hpp"
+#include "eth/udp.hpp"
+#include "device_common/control_protocol.hpp"
+
+#include <stdint.h>
+#include <errno.h>
+#include <string.h>
+
+
+namespace hailort
+{
+
+#define MILLISECONDS_IN_SECOND (1000)
+#define MICROSECONDS_IN_MILLISECOND (1000)
+
+//initialize with padding
+uint8_t g_padded_buffer[MAX_UDP_PAYLOAD_SIZE] = {0,};
+
+hailo_status Udp::bind(struct in_addr host_ip, uint16_t host_port)
+{
+ m_host_address.sin_family = AF_INET;
+ m_host_address.sin_port = htons(host_port);
+ m_host_address.sin_addr = host_ip;
+ m_host_address_length = sizeof(m_host_address);
+
+ /* Bind the socket */
+ auto status = m_socket.socket_bind((struct sockaddr*)&(m_host_address), m_host_address_length);
+ CHECK_SUCCESS(status);
+
+ /* Save binded host address information */
+ return m_socket.get_sock_name((struct sockaddr*)&(m_host_address), &m_host_address_length);
+}
+
+Expected<Udp> Udp::create(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip,
+ uint16_t host_port)
+{
+ auto status = HAILO_UNINITIALIZED;
+ auto socket = Socket::create(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ CHECK_EXPECTED(socket);
+ auto object = Udp(device_ip, device_port, host_ip, host_port, socket.release(), status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return object;
+}
+
+Udp::Udp(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip, uint16_t host_port,
+ Socket &&socket, hailo_status &status) : m_socket(std::move(socket))
+{
+ m_device_address.sin_family = AF_INET;
+ m_device_address.sin_port = htons(device_port);
+ m_device_address.sin_addr = device_ip;
+ m_device_address_length = sizeof(m_device_address);
+
+ /* Adjust socket rcv buff size */
+ status = m_socket.set_recv_buffer_size_max();
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ /* Set default value timeout */
+ status = set_timeout(std::chrono::milliseconds(HAILO_DEFAULT_ETH_SCAN_TIMEOUT_MS));
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ /* Set deafult max number of retries */
+ status = set_max_number_of_attempts(HAILO_DEFAULT_ETH_MAX_NUMBER_OF_RETRIES);
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ /* If device address is 255.255.255.255 (broadcast), enable broadcast */
+ if (INADDR_BROADCAST == m_device_address.sin_addr.s_addr) {
+ status = m_socket.enable_broadcast();
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+ }
+
+ /* Bind socket at the host */
+ status = bind(host_ip, host_port);
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+hailo_status Udp::set_timeout(const std::chrono::milliseconds timeout_ms)
+{
+ return m_socket.set_timeout(timeout_ms, &(m_timeout));
+}
+
+hailo_status Udp::send(uint8_t *buffer, size_t *size, bool use_padding, size_t max_payload_size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ size_t number_of_sent_bytes = 0;
+ uint8_t *send_ptr = buffer;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(buffer);
+ CHECK_ARG_NOT_NULL(size);
+
+ if (use_padding) {
+ if (*size > (max_payload_size - PADDING_BYTES_SIZE - PADDING_ALIGN_BYTES)) {
+ *size = (max_payload_size - PADDING_BYTES_SIZE - PADDING_ALIGN_BYTES);
+ }
+ /*copy the data to the padded buffer and adjust the size*/
+ memcpy((g_padded_buffer + PADDING_BYTES_SIZE), buffer, *size);
+ send_ptr = g_padded_buffer;
+ *size += PADDING_BYTES_SIZE;
+ }
+ else if (*size > max_payload_size) {
+ *size = max_payload_size;
+ }
+
+ status = m_socket.send_to((const uint8_t*)send_ptr, *size, MSG_CONFIRM, (const struct sockaddr *) &m_device_address,
+ m_device_address_length, &number_of_sent_bytes);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Socket send_to was aborted!");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ /*if we had to pad, omit the padding when returning the number of bytes*/
+ if (use_padding) {
+ number_of_sent_bytes -= PADDING_BYTES_SIZE;
+ }
+
+ /* number_of_sent_bytes will be positive because of the validation above */
+ *size = (size_t)number_of_sent_bytes;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Udp::recv(uint8_t *buffer, size_t *size)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ size_t number_of_received_bytes = 0;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(buffer);
+ CHECK_ARG_NOT_NULL(size);
+
+ if (*size > MAX_UDP_PAYLOAD_SIZE) {
+ *size = MAX_UDP_PAYLOAD_SIZE;
+ }
+
+ status = m_socket.recv_from(buffer, *size, 0, (struct sockaddr *) &m_device_address, m_device_address_length,
+ &number_of_received_bytes);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Socket recv_from was aborted!");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ *size = number_of_received_bytes;
+ return HAILO_SUCCESS;
+}
+
+hailo_status Udp::abort()
+{
+ return m_socket.abort();
+}
+
+hailo_status Udp::has_data(bool log_timeouts_in_debug)
+{
+ return m_socket.has_data((struct sockaddr *) &m_device_address, m_device_address_length, log_timeouts_in_debug);
+}
+
+hailo_status Udp::receive_fw_response(uint8_t *buffer, size_t *size, uint32_t expected_sequence)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+
+ size_t receive_attempts = 0;
+ uint32_t received_sequence = 0;
+
+ ASSERT(NULL != buffer);
+ ASSERT(NULL != size);
+
+ for (receive_attempts = 0; receive_attempts < m_max_number_of_attempts; receive_attempts++) {
+ /* Receive a single packet */
+ status = recv(buffer, size);
+ CHECK_SUCCESS(status);
+
+ /* Get the sequence from the buffer */
+ common_status = CONTROL_PROTOCOL__get_sequence_from_response_buffer(buffer, *size, &received_sequence);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ if (received_sequence == expected_sequence) {
+ /* Received the expected response */
+ break;
+ } else {
+ /* Invalid response was received */
+ LOGGER__WARNING("Invalid sequence received (received {}, expected {}). Discarding it.", received_sequence,
+ expected_sequence);
+ continue;
+ }
+ }
+ CHECK((receive_attempts < m_max_number_of_attempts), HAILO_ETH_FAILURE,
+ "Received a response with an invalid sequence for {} time.", receive_attempts);
+
+ return HAILO_SUCCESS;
+}
+
+
+hailo_status Udp::fw_interact_impl(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
+ size_t *response_size, uint32_t expected_sequence)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ size_t expected_request_size = request_size;
+ /* If the response_size value is 0, we do not expect response from the fw */
+ bool expecting_response = (0 != *response_size);
+
+ ASSERT(NULL != request_buffer);
+ ASSERT(NULL != response_buffer);
+ ASSERT(NULL != response_size);
+
+ status = send(request_buffer, &request_size, false, MAX_UDP_PAYLOAD_SIZE);
+ CHECK_SUCCESS(status);
+
+ /* Validate all bytes were actually sent */
+ CHECK(expected_request_size == request_size, HAILO_ETH_FAILURE,
+ "Did not send all data at UDP__fw_interact. Expected to send: {}, actually sent: {}", expected_request_size,
+ request_size);
+
+ status = receive_fw_response(response_buffer, response_size, expected_sequence);
+ if ((HAILO_TIMEOUT == status) && !expecting_response) {
+ // This timeout was predictable
+ status = HAILO_SUCCESS;
+ }
+ return status;
+}
+
+hailo_status Udp::fw_interact(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
+ size_t *response_size, uint32_t expected_sequence)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ /* Validate arguments */
+ CHECK_ARG_NOT_NULL(request_buffer);
+ CHECK_ARG_NOT_NULL(response_buffer);
+ CHECK_ARG_NOT_NULL(response_size);
+
+ /* Not clearing the read socket before, because the FW ignores duplicated controls,
+ so a leftover control response in the read socket is not possible */
+
+ for (size_t attempt_number = 0; attempt_number < m_max_number_of_attempts; ++attempt_number) {
+ status = fw_interact_impl(request_buffer, request_size, response_buffer, response_size, expected_sequence);
+ if ((HAILO_ETH_RECV_FAILURE == status) || (HAILO_ETH_SEND_FAILURE == status) || (HAILO_TIMEOUT == status)) {
+ LOGGER__WARN("Control response was not received, sending it again. Attempt number: {} (zero indexed)",
+ attempt_number);
+ continue;
+ }
+ CHECK_SUCCESS(status);
+ /* Not validating amount of received bytes because we can not know how many bytes are expected */
+ break;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Udp::set_max_number_of_attempts(uint8_t max_number_of_attempts)
+{
+ /* Validate arguments */
+ CHECK(0 < max_number_of_attempts, HAILO_INVALID_ARGUMENT,
+ "Invalid max_number_of_attempts attempt to be set. max_number_of_attempts cannot be 0.");
+
+ m_max_number_of_attempts = max_number_of_attempts;
+
+ return HAILO_SUCCESS;
+
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file udp.hpp
+ * @brief Defines udp transport method.
+ **/
+
+#ifndef __OS_UDP_H__
+#define __OS_UDP_H__
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/socket.hpp"
+
+
+namespace hailort
+{
+
+typedef struct sockaddr_in UDP__sockaddr_in_t;
+typedef struct timeval UDP__timeout_t;
+
+class Udp final {
+public:
+ static Expected<Udp> create(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip,
+ uint16_t host_port);
+
+ hailo_status set_timeout(const std::chrono::milliseconds timeout_ms);
+ hailo_status send(uint8_t *buffer, size_t *size, bool use_padding, size_t max_payload_size);
+ hailo_status recv(uint8_t *buffer, size_t *size);
+ hailo_status abort();
+ hailo_status has_data(bool log_timeouts_in_debug = false);
+ hailo_status fw_interact(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
+ size_t *response_size, uint32_t expected_sequence);
+ hailo_status set_max_number_of_attempts(uint8_t max_number_of_attempts);
+
+ UDP__sockaddr_in_t m_host_address;
+ socklen_t m_host_address_length;
+ UDP__sockaddr_in_t m_device_address;
+ socklen_t m_device_address_length;
+ UDP__timeout_t m_timeout;
+
+private:
+ Udp(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip, uint16_t host_port,
+ Socket &&socket, hailo_status &status);
+
+ hailo_status bind(struct in_addr host_ip, uint16_t host_port);
+ hailo_status receive_fw_response(uint8_t *buffer, size_t *size, uint32_t expected_sequence);
+ hailo_status fw_interact_impl(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
+ size_t *response_size, uint32_t expected_sequence);
+
+ uint8_t m_max_number_of_attempts;
+ Socket m_socket;
+};
+
+} /* namespace hailort */
+
+#endif /* __OS_UDP_H__ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file eth_device.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "eth_device.hpp"
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-#include "hailo/device.hpp"
-#include "control.hpp"
-#include "udp.hpp"
-#include "common/ethernet_utils.hpp"
-#include "hailo/hef.hpp"
-
-#include <stdlib.h>
-#include <errno.h>
-#include <new>
-#include <array>
-
-namespace hailort
-{
-
-#define SCAN_SEQUENCE (0)
-#define WAIT_FOR_DEVICE_WAKEUP_MAX_ATTEMPTS (10)
-#define WAIT_FOR_DEVICE_WAKEUP_TIMEOUT (1000)
-#define ETH_BROADCAST_IP ("255.255.255.255")
-
-
-hailo_status EthernetDevice::fw_interact_impl(uint8_t *request_buffer, size_t request_size,
- uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id)
-{
- /* CPU id is used only in PCIe, for Eth all control goes to APP CPU.*/
- (void)cpu_id;
- return m_control_udp.fw_interact(request_buffer, request_size, response_buffer, response_size, m_control_sequence);
-}
-
-hailo_status EthernetDevice::wait_for_wakeup()
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
-
- /* Create udp socket */
- auto udp = Udp::create(m_device_info.device_address.sin_addr, m_device_info.device_address.sin_port,
- m_device_info.host_address.sin_addr, m_device_info.host_address.sin_port);
- CHECK_EXPECTED_AS_STATUS(udp);
-
- status = udp->set_timeout(std::chrono::milliseconds(WAIT_FOR_DEVICE_WAKEUP_TIMEOUT));
- CHECK_SUCCESS(status);
-
- status = udp->set_max_number_of_attempts(WAIT_FOR_DEVICE_WAKEUP_MAX_ATTEMPTS);
- CHECK_SUCCESS(status);
-
- /* Create and send identify-control until it runs successfully */
- common_status = CONTROL_PROTOCOL__pack_identify_request(&request, &request_size, m_control_sequence);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- status = udp->fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size,
- m_control_sequence);
-
- // Always increment sequence
- m_control_sequence = (m_control_sequence + 1) % CONTROL__MAX_SEQUENCE;
- CHECK_SUCCESS(status);
-
- /* Parse and validate the response */
- return Control::parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header, &payload, &request);
-}
-
-Expected<std::unique_ptr<EthernetDevice>> EthernetDevice::create(const hailo_eth_device_info_t &device_info)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- // Creates control socket
- auto udp = Udp::create(device_info.device_address.sin_addr, device_info.device_address.sin_port,
- device_info.host_address.sin_addr, device_info.host_address.sin_port);
- CHECK_EXPECTED(udp, "Failed to init control socket.");
-
- auto device = std::unique_ptr<EthernetDevice>(new (std::nothrow) EthernetDevice(device_info, udp.release(), status));
- CHECK_AS_EXPECTED((nullptr != device), HAILO_OUT_OF_HOST_MEMORY);
-
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed creating EthernetDevice");
- return make_unexpected(status);
- }
-
- return device;
-}
-
-Expected<std::unique_ptr<EthernetDevice>> EthernetDevice::create(const std::string &ip_addr)
-{
- const bool LOG_ON_FAILURE = true;
- auto device_info = parse_eth_device_info(ip_addr, LOG_ON_FAILURE);
- CHECK_EXPECTED(device_info, "Failed to parse ip address {}", ip_addr);
- return create(device_info.release());
-}
-
-EthernetDevice::EthernetDevice(const hailo_eth_device_info_t &device_info, Udp &&control_udp, hailo_status &status) :
- DeviceBase::DeviceBase(Device::Type::ETH),
- m_device_info(device_info),
- m_control_udp(std::move(control_udp))
-{
- char ip_buffer[INET_ADDRSTRLEN];
- status = Socket::ntop(AF_INET, &(device_info.device_address.sin_addr), ip_buffer, INET_ADDRSTRLEN);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Socket::ntop() failed with status {}", status);
- return;
- }
- m_device_id = std::string(ip_buffer);
-
- status = m_control_udp.set_timeout(std::chrono::milliseconds(m_device_info.timeout_millis));
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to init set timeout for control socket.");
- return;
- }
-
- status = m_control_udp.set_max_number_of_attempts(m_device_info.max_number_of_attempts);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to init set max_number_of_attempts for control socket.");
- return;
- }
-
- status = update_fw_state();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("update_fw_state() failed with status {}", status);
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-Expected<size_t> EthernetDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
-{
- (void) buffer;
- (void) cpu_id;
- return make_unexpected(HAILO_NOT_IMPLEMENTED);
-}
-
-static void eth_device__fill_eth_device_info(Udp &udp, hailo_eth_device_info_t *eth_device_info)
-{
- eth_device_info->device_address.sin_family = AF_INET;
- eth_device_info->device_address.sin_addr = udp.m_device_address.sin_addr;
- eth_device_info->device_address.sin_port = HAILO_DEFAULT_ETH_CONTROL_PORT;
-
- eth_device_info->host_address.sin_family = AF_INET;
- eth_device_info->host_address.sin_addr.s_addr = INADDR_ANY;
- eth_device_info->host_address.sin_port = HAILO_ETH_PORT_ANY;
-
- eth_device_info->max_number_of_attempts = HAILO_DEFAULT_ETH_MAX_NUMBER_OF_RETRIES;
- eth_device_info->max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
- eth_device_info->timeout_millis = HAILO_DEFAULT_ETH_SCAN_TIMEOUT_MS;
-
- char textual_ip_address[INET_ADDRSTRLEN];
- auto inet = inet_ntop(AF_INET, &(udp.m_device_address.sin_addr), textual_ip_address, INET_ADDRSTRLEN);
- if (NULL != inet) {
- LOGGER__DEBUG("Found Hailo device: {}", textual_ip_address);
- }
-}
-
-static Expected<hailo_eth_device_info_t> eth_device__handle_available_data(Udp &udp)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- /* Try to receive data from the udp socket and log timeouts in debug level */
- status = udp.has_data(true);
- if (HAILO_TIMEOUT == status) {
- LOGGER__DEBUG("Scan timeout");
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- hailo_eth_device_info_t device_info{};
- eth_device__fill_eth_device_info(udp, &device_info);
-
- return device_info;
-}
-
-static Expected<std::vector<hailo_eth_device_info_t>> eth_device__receive_responses(Udp &udp)
-{
- std::vector<hailo_eth_device_info_t> results;
- while (true) {
- auto next_device_info = eth_device__handle_available_data(udp);
- if (next_device_info.has_value()) {
- results.emplace_back(next_device_info.release());
- } else if (HAILO_TIMEOUT == next_device_info.status()) {
- // We excpect to stop receiving data due to timeout
- break;
- } else {
- // Any other reason indicates a problem
- return make_unexpected(next_device_info.status());
- }
- }
-
- return results;
-}
-
-Expected<std::vector<hailo_eth_device_info_t>> EthernetDevice::scan(const std::string &interface_name,
- std::chrono::milliseconds timeout)
-{
- // Convert interface name to IP address
- std::array<char, IPV4_STRING_MAX_LENGTH> interface_ip_address{};
- auto status = EthernetUtils::get_ip_from_interface(interface_name.c_str(), interface_ip_address.data(), interface_ip_address.size());
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return scan_by_host_address(interface_ip_address.data(), timeout);
-}
-
-hailo_status get_udp_broadcast_params(const char *host_address, struct in_addr &interface_ip_address,
- struct in_addr &broadcast_ip_address)
-{
- assert(nullptr != host_address);
-
- auto status = Socket::pton(AF_INET, host_address, &interface_ip_address);
- CHECK_SUCCESS(status, "Invalid host ip address {}", host_address);
- status = Socket::pton(AF_INET, ETH_BROADCAST_IP, &broadcast_ip_address);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::vector<hailo_eth_device_info_t>> EthernetDevice::scan_by_host_address(const std::string &host_address,
- std::chrono::milliseconds timeout)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request{};
- size_t request_size = 0;
- uint32_t sequence = SCAN_SEQUENCE;
- struct in_addr broadcast_ip_address{};
- struct in_addr interface_ip_address{};
-
- status = get_udp_broadcast_params(host_address.c_str(), interface_ip_address, broadcast_ip_address);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Create broadcast udp object */
- auto udp_broadcast = Udp::create(broadcast_ip_address, HAILO_DEFAULT_ETH_CONTROL_PORT, interface_ip_address, 0);
- CHECK_EXPECTED(udp_broadcast);
- status = udp_broadcast->set_timeout(timeout);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Build identify request */
- common_status = CONTROL_PROTOCOL__pack_identify_request(&request, &request_size, sequence);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Send broadcast identify request */
- status = udp_broadcast->send((uint8_t *)&request, &request_size, false, MAX_UDP_PAYLOAD_SIZE);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Receive all responses */
- return eth_device__receive_responses(*udp_broadcast);
-}
-
-Expected<hailo_eth_device_info_t> EthernetDevice::parse_eth_device_info(const std::string &ip_addr,
- bool log_on_failure)
-{
- hailo_eth_device_info_t device_info{};
-
- device_info.host_address.sin_family = AF_INET;
- device_info.host_address.sin_port = HAILO_ETH_PORT_ANY;
-
- auto status = Socket::pton(AF_INET, HAILO_ETH_ADDRESS_ANY, &(device_info.host_address.sin_addr));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- device_info.device_address.sin_family = AF_INET;
- device_info.device_address.sin_port = HAILO_DEFAULT_ETH_CONTROL_PORT;
- status = Socket::pton(AF_INET, ip_addr.c_str(), &(device_info.device_address.sin_addr));
- if (status != HAILO_SUCCESS) {
- if (log_on_failure) {
- LOGGER__ERROR("Invalid ip address {}", ip_addr);
- }
- return make_unexpected(status);
- }
-
- device_info.timeout_millis = HAILO_DEFAULT_ETH_SCAN_TIMEOUT_MS;
- device_info.max_number_of_attempts = HAILO_DEFAULT_ETH_MAX_NUMBER_OF_RETRIES;
- device_info.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
-
- return device_info;
-}
-
-void EthernetDevice::increment_control_sequence()
-{
- m_control_sequence = (m_control_sequence + 1) % CONTROL__MAX_SEQUENCE;
-}
-
-hailo_reset_device_mode_t EthernetDevice::get_default_reset_mode()
-{
- return HAILO_RESET_DEVICE_MODE_CHIP;
-}
-
-hailo_status EthernetDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- bool is_expecting_response = true;
-
- switch (reset_type) {
- case CONTROL_PROTOCOL__RESET_TYPE__CHIP:
- is_expecting_response = false;
- break;
- case CONTROL_PROTOCOL__RESET_TYPE__SOFT:
- /* Fallthrough */
- case CONTROL_PROTOCOL__RESET_TYPE__FORCED_SOFT:
- is_expecting_response = false; // TODO: Check boot source, set is_expecting_response = (boot_source != pcie)
- break;
- default:
- is_expecting_response = true;
- break;
- }
-
- common_status = CONTROL_PROTOCOL__pack_reset_request(&request, &request_size, m_control_sequence, reset_type);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- /* On non-reponse controls we set the response_size to 0 */
- if (!is_expecting_response) {
- response_size = 0;
- }
-
- LOGGER__DEBUG("Sending reset request");
- status = this->fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- // fw_interact should return success even if response is not expected
- CHECK_SUCCESS(status);
-
- /* Parse response if expected */
- // TODO: fix logic with respect to is_expecting_response
- if (0 != response_size) {
- status = Control::parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header,
- &payload, &request);
- CHECK_SUCCESS(status);
- CHECK(is_expecting_response, HAILO_INTERNAL_FAILURE,
- "Recived valid response from FW for control who is not expecting one.");
- } else {
- status = this->wait_for_wakeup();
- CHECK_SUCCESS(status);
- }
-
- LOGGER__DEBUG("Board has been reset successfully");
- return HAILO_SUCCESS;
-}
-
-Expected<hailo_device_architecture_t> EthernetDevice::get_architecture() const
-{
- // FW is always up if we got here (EthernetDevice's ctor would fail otherwise)
- // Hence, just return it
- return Expected<hailo_device_architecture_t>(m_device_architecture);
-}
-
-hailo_eth_device_info_t EthernetDevice::get_device_info() const
-{
- return m_device_info;
-}
-
-const char *EthernetDevice::get_dev_id() const
-{
- return m_device_id.c_str();
-}
-
-Expected<D2H_EVENT_MESSAGE_t> EthernetDevice::read_notification()
-{
- return make_unexpected(HAILO_NOT_IMPLEMENTED);
-}
-
-hailo_status EthernetDevice::disable_notifications()
-{
- return HAILO_NOT_IMPLEMENTED;
-}
-
-Expected<ConfiguredNetworkGroupVector> EthernetDevice::add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params)
-{
- auto device_arch_exp = get_architecture();
- CHECK_EXPECTED(device_arch_exp);
- auto device_arch = device_arch_exp.release();
-
- auto partial_clusters_layout_bitmap_exp = Control::get_partial_clusters_layout_bitmap(*this);
- CHECK_EXPECTED(partial_clusters_layout_bitmap_exp);
- auto partial_clusters_layout_bitmap = partial_clusters_layout_bitmap_exp.release();
-
- auto &hef_network_groups = hef.pimpl->network_groups();
- ConfiguredNetworkGroupVector added_network_groups;
- // TODO: can be optimized (add another loop the allocate the network group we're adding)
- added_network_groups.reserve(hef_network_groups.size());
- auto configure_params_copy = configure_params;
- auto hef_arch = hef.pimpl->get_device_arch();
-
- // Reset FW state_machine status - can be removed?
- static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
- auto status = Control::reset_context_switch_state_machine(*this, REMOVE_NN_CONFIG_DURING_RESET);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- for (const auto &base_network_group_proto : hef_network_groups) {
- const std::string &network_group_name = base_network_group_proto->network_group_metadata().network_group_name();
- auto &hef_core_ops = hef.pimpl->core_ops(network_group_name);
- assert(hef_core_ops.size() == 1);
- const auto &core_op = hef_core_ops[0];
-
- auto expected_partial_core_op = Hef::Impl::get_core_op_per_arch(core_op, hef_arch, device_arch,
- partial_clusters_layout_bitmap);
- CHECK_EXPECTED(expected_partial_core_op);
- auto partial_core_op = expected_partial_core_op.release();
-
- // TODO: decide about core_op names - align with the Compiler
-
- /* If NG params are present, use them
- If no configure params are given, use default*/
- ConfigureNetworkParams config_params{};
- if (contains(configure_params, network_group_name)) {
- config_params = configure_params_copy.at(network_group_name);
- configure_params_copy.erase(network_group_name);
- } else if (configure_params.empty()) {
- auto interface = get_default_streams_interface();
- CHECK_EXPECTED(interface);
- auto config_params_exp = hef.create_configure_params(interface.value(), network_group_name);
- CHECK_EXPECTED(config_params_exp);
- config_params = config_params_exp.release();
- } else {
- continue;
- }
-
- /* Validate that all network_groups are single context */
- CHECK(1 == partial_core_op->contexts.size(), make_unexpected(HAILO_INTERNAL_FAILURE),
- "Only single_context network_groups is supported!. Network group {} has {} contexts.",
- network_group_name, partial_core_op->contexts.size());
- CHECK_AS_EXPECTED(!(Hef::Impl::contains_ddr_layers(*partial_core_op)), HAILO_INVALID_OPERATION,
- "DDR layers are only supported for PCIe device. Network group {} contains DDR layers.",
- network_group_name);
- status = Hef::Impl::validate_core_op_unique_layer_names(*partial_core_op);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- /* Update preliminary_config and dynamic_contexts recepies */
- auto &proto_preliminary_config = partial_core_op->preliminary_config;
- auto net_group_config = Hef::Impl::create_single_context_network_group_config(proto_preliminary_config);
- CHECK_EXPECTED(net_group_config);
-
- auto network_group_metadata = hef.pimpl->get_network_group_metadata(network_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- auto net_flow_ops = hef.pimpl->post_process_ops(network_group_metadata->network_group_name());
-
- auto single_context_app = HcpConfigNetworkGroup(*this, m_active_net_group_holder, net_group_config.release(),
- config_params, network_group_metadata.release(), status, std::move(net_flow_ops));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto net_group_shared_ptr = make_shared_nothrow<HcpConfigNetworkGroup>(std::move(single_context_app));
- CHECK_AS_EXPECTED(nullptr != net_group_shared_ptr, HAILO_OUT_OF_HOST_MEMORY);
- m_network_groups.emplace_back(net_group_shared_ptr);
- added_network_groups.emplace_back(std::static_pointer_cast<ConfiguredNetworkGroup>(net_group_shared_ptr));
-
- // TODO: move this func into HcpConfigNetworkGroup c'tor
- status = net_group_shared_ptr->create_streams_from_config_params(*this);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- // Check that all boundary streams were created
- status = hef.pimpl->validate_boundary_streams_were_created(network_group_name, *net_group_shared_ptr);
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
- std::string unmatched_keys = "";
- for (const auto &pair : configure_params_copy) {
- unmatched_keys.append(" ");
- unmatched_keys.append(pair.first);
- }
- CHECK_AS_EXPECTED(unmatched_keys.size() == 0, HAILO_INVALID_ARGUMENT,
- "Some network group names in the configuration are not found in the hef file:{}", unmatched_keys);
-
- return added_network_groups;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file eth_device.hpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#ifndef HAILO_ETH_DEVICE_H_
-#define HAILO_ETH_DEVICE_H_
-
-#include "hailo/expected.hpp"
-#include "hailo/hailort.h"
-#include "device_internal.hpp"
-#include "udp.hpp"
-#include "context_switch/single_context/hcp_config_network_group.hpp"
-
-namespace hailort
-{
-
-class EthernetDevice : public DeviceBase {
-public:
- virtual hailo_status fw_interact_impl(uint8_t *request_buffer, size_t request_size,
- uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id) override;
- virtual Expected<size_t> read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id) override;
- virtual hailo_status wait_for_wakeup() override;
- virtual void increment_control_sequence() override;
- virtual hailo_reset_device_mode_t get_default_reset_mode() override;
- virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
-
- virtual bool is_stream_interface_supported(const hailo_stream_interface_t &stream_interface) const override
- {
- switch (stream_interface) {
- case HAILO_STREAM_INTERFACE_PCIE:
- case HAILO_STREAM_INTERFACE_CORE:
- return false;
- case HAILO_STREAM_INTERFACE_ETH:
- case HAILO_STREAM_INTERFACE_MIPI:
- return true;
- default:
- LOGGER__ERROR("Invalid stream interface");
- return false;
- }
- }
-
- static Expected<std::vector<hailo_eth_device_info_t>> scan(const std::string &interface_name,
- std::chrono::milliseconds timeout);
- static Expected<std::vector<hailo_eth_device_info_t>> scan_by_host_address(const std::string &host_address,
- std::chrono::milliseconds timeout);
- static Expected<hailo_eth_device_info_t> parse_eth_device_info(const std::string &ip_addr, bool log_on_failure);
-
- static Expected<std::unique_ptr<EthernetDevice>> create(const hailo_eth_device_info_t &device_info);
- static Expected<std::unique_ptr<EthernetDevice>> create(const std::string &ip_addr);
- virtual Expected<hailo_device_architecture_t> get_architecture() const override;
- hailo_eth_device_info_t get_device_info() const;
- virtual const char* get_dev_id() const override;
-
-protected:
- virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() override;
- virtual hailo_status disable_notifications() override;
- virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) override;
-
-private:
- EthernetDevice(const hailo_eth_device_info_t &device_info, Udp &&control_udp, hailo_status &status);
-
- const hailo_eth_device_info_t m_device_info;
- std::string m_device_id;
- Udp m_control_udp;
- std::vector<std::shared_ptr<HcpConfigNetworkGroup>> m_network_groups;
- ActiveNetGroupHolder m_active_net_group_holder;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_ETH_DEVICE_H_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file eth_stream.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include <new>
-#include <stdlib.h>
-#include <math.h>
-#include <byte_order.h>
-
-#include <hailo/hailort.h>
-#include "common/utils.hpp"
-#include "hailo/stream.hpp"
-#include "hailo/hef.hpp"
-#include "hailo/hailort_common.hpp"
-#include "eth_stream.hpp"
-#include "eth_device.hpp"
-#include "control.hpp"
-#include "token_bucket.hpp"
-#include "common/ethernet_utils.hpp"
-
-namespace hailort
-{
-
-#define SYNC_PACKET_BARKER (0xa143341a)
-
-
-typedef struct hailo_output_sync_packet_t {
- uint32_t barker;
- uint32_t sequence_index;
-} hailo_output_sync_packet_t;
-
-EthernetInputStream::~EthernetInputStream()
-{
- if (m_is_stream_activated) {
- auto status = this->deactivate_stream();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Close stream failed! (status {} stream index {})", status, m_stream_info.index);
- }
- }
-}
-
-
-Expected<Udp> eth_stream__create_udp(EthernetDevice *eth_device, struct sockaddr_in host_address, uint8_t stream_index,
- port_t device_port, bool is_input)
-{
- if (HAILO_DEFAULT_ETH_DEVICE_PORT == device_port) {
- if (is_input) {
- device_port = (uint16_t)(stream_index + HailoRTCommon::ETH_INPUT_BASE_PORT);
- } else {
- device_port = (uint16_t)(stream_index + HailoRTCommon::ETH_OUTPUT_BASE_PORT);
- }
- }
-
- return Udp::create(eth_device->get_device_info().device_address.sin_addr, device_port, host_address.sin_addr,
- host_address.sin_port);
-}
-
-/** Input stream **/
-hailo_status EthernetInputStream::deactivate_stream()
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- ASSERT(m_is_stream_activated);
-
- // TODO: Hold a ref not a pointer
- status = Control::close_stream(m_device, m_dataflow_manager_id, true);
- CHECK_SUCCESS(status);
-
- m_is_stream_activated = false;
-
- return HAILO_SUCCESS;
-}
-
-// Note: Ethernet streams don't work with dynamic batch sizes
-hailo_status EthernetInputStream::activate_stream(uint16_t /* dynamic_batch_size */)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- CONTROL_PROTOCOL__config_stream_params_t params = {};
-
- params.nn_stream_config = m_nn_stream_config;
- params.communication_type = CONTROL_PROTOCOL__COMMUNICATION_TYPE_UDP;
- params.is_input = true;
- params.stream_index = m_stream_info.index;
- params.communication_params.udp_input.listening_port = (uint16_t)(BYTE_ORDER__htons(m_udp.m_device_address.sin_port));
- params.skip_nn_stream_config = false;
- // Currently hardcoded assign as there are no power mode optimizations over eth
- params.power_mode = static_cast<uint8_t>(CONTROL_PROTOCOL__MODE_ULTRA_PERFORMANCE);
-
- if (this->configuration.is_sync_enabled) {
- params.communication_params.udp_input.sync.should_sync = true;
- params.communication_params.udp_input.sync.frames_per_sync = this->configuration.frames_per_sync;
- params.communication_params.udp_input.sync.packets_per_frame = this->configuration.packets_per_frame;
- params.communication_params.udp_input.sync.sync_size = this->configuration.sync_size;
- }
-
- params.communication_params.udp_input.buffers_threshold = this->configuration.buffers_threshold;
- params.communication_params.udp_input.use_rtp = false;
-
- status = Control::config_stream_udp_input(m_device, ¶ms, m_dataflow_manager_id);
- CHECK_SUCCESS(status);
-
- status = Control::open_stream(m_device, m_dataflow_manager_id, true);
- CHECK_SUCCESS(status);
-
- m_is_stream_activated = true;
-
- return HAILO_SUCCESS;
-}
-
-Expected<size_t> EthernetInputStream::sync_write_raw_buffer(const MemoryView &buffer)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- status = get_network_group_activated_event()->wait(std::chrono::milliseconds(0));
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED, "Trying to write on stream before its network_group is activated");
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- size_t size = buffer.size();
- status = m_udp.send((uint8_t*)buffer.data(), &size, this->configuration.use_dataflow_padding, this->configuration.max_payload_size);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Udp send was aborted!");
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status, "{} (H2D) failed with status={}", name(), status);
-
- return size;
-}
-
-hailo_status EthernetInputStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- ASSERT(NULL != buffer);
-
- CHECK(size >= MIN_UDP_PAYLOAD_SIZE, HAILO_INVALID_ARGUMENT, "Input must be larger than {}", MIN_UDP_PAYLOAD_SIZE);
- CHECK(((size % HailoRTCommon::HW_DATA_ALIGNMENT) == 0), HAILO_INVALID_ARGUMENT,
- "Input must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, size);
-
- if (this->configuration.is_sync_enabled) {
- status = eth_stream__write_all_with_sync(buffer, offset, size);
- } else {
- status = eth_stream__write_all_no_sync(buffer, offset, size);
- }
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("eth_stream__write_all was aborted!");
- return status;
- }
-
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status EthernetInputStream::eth_stream__write_all_no_sync(void *buffer, size_t offset, size_t size) {
- size_t remainder_size = 0;
- size_t packet_size = this->configuration.max_payload_size;
-
- //if we have padding, consider it when calculating the packet sizes
- if (this->configuration.use_dataflow_padding) {
- packet_size -= PADDING_BYTES_SIZE + PADDING_ALIGN_BYTES;
- }
-
- remainder_size = size % packet_size;
-
- if ((0 < remainder_size) && (remainder_size < MIN_UDP_PAYLOAD_SIZE)) {
- remainder_size = MIN_UDP_PAYLOAD_SIZE;
- }
- return eth_stream__write_with_remainder(buffer, offset, size, remainder_size);
-}
-
-hailo_status EthernetInputStream::eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size) {
- size_t transfer_size = 0;
- size_t offset_end_without_remainder = offset + size - remainder_size;
-
- while (offset < offset_end_without_remainder) {
- transfer_size = offset_end_without_remainder - offset;
- auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, transfer_size));
- if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
- LOGGER__INFO("sync_write_raw_buffer was aborted!");
- return expected_bytes_written.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
- offset += expected_bytes_written.release();
- }
- if (0 < remainder_size) {
- auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, remainder_size));
- if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
- LOGGER__INFO("sync_write_raw_buffer was aborted!");
- return expected_bytes_written.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
- assert(expected_bytes_written.value() == remainder_size);
- }
-
- return HAILO_SUCCESS;
-}
-
-EthernetInputStreamRateLimited::EthernetInputStreamRateLimited(Device &device, Udp &&udp,
- EventPtr &&network_group_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status) :
- EthernetInputStream::EthernetInputStream(device, std::move(udp), std::move(network_group_activated_event), layer_info, status),
- rate_bytes_per_sec(rate_bytes_per_sec)
-{}
-
-EthernetInputStreamRateLimited::EthernetInputStreamRateLimited(EthernetInputStreamRateLimited &&other) :
- EthernetInputStream(std::move(other)),
- rate_bytes_per_sec(other.rate_bytes_per_sec)
-{}
-
-TokenBucketEthernetInputStream::TokenBucketEthernetInputStream(Device &device, Udp &&udp,
- EventPtr &&network_group_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status) :
- EthernetInputStreamRateLimited::EthernetInputStreamRateLimited(device, std::move(udp),
- std::move(network_group_activated_event), rate_bytes_per_sec, layer_info, status),
- token_bucket()
-{}
-
-TokenBucketEthernetInputStream::TokenBucketEthernetInputStream(TokenBucketEthernetInputStream &&other) :
- EthernetInputStreamRateLimited(std::move(other)),
- token_bucket(std::move(other.token_bucket))
-{}
-
-hailo_status TokenBucketEthernetInputStream::eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size) {
- size_t transfer_size = 0;
- size_t offset_end_without_remainder = offset + size - remainder_size;
-
- assert(remainder_size <= MAX_CONSUME_SIZE);
- static_assert(MAX_CONSUME_SIZE <= BURST_SIZE, "We are asking to consume more bytes than the size of the token bucket, this will fail");
-
- while (offset < offset_end_without_remainder) {
- (void)token_bucket.consumeWithBorrowAndWait(MAX_CONSUME_SIZE, rate_bytes_per_sec, BURST_SIZE);
-
- transfer_size = offset_end_without_remainder - offset;
- auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, transfer_size));
- if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
- LOGGER__INFO("sync_write_raw_buffer was aborted!");
- return expected_bytes_written.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
- offset += expected_bytes_written.release();
- }
- if (0 < remainder_size) {
- // We don't static_assert that "remainder_size <= BURST_SIZE", so the call could fail in theory.
- // However, since remainder_size is modulo MAX_UDP_PAYLOAD_SIZE and BURST_SIZE == MAX_UDP_PAYLOAD_SIZE, it should be smaller.
- (void)token_bucket.consumeWithBorrowAndWait(static_cast<double>(remainder_size), rate_bytes_per_sec, BURST_SIZE);
-
- auto expected_bytes_written = sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, remainder_size));
- if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_written.status()) {
- LOGGER__INFO("sync_write_raw_buffer was aborted!");
- return expected_bytes_written.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_bytes_written);
- assert(expected_bytes_written.value() == remainder_size);
- }
-
- return HAILO_SUCCESS;
-}
-
-#if defined(__GNUC__)
-Expected<std::unique_ptr<TrafficControlEthernetInputStream>> TrafficControlEthernetInputStream::create(
- Device &device, Udp &&udp, EventPtr &&network_group_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info)
-{
- auto board_ip = get_interface_address(&udp.m_device_address.sin_addr);
- CHECK_EXPECTED(board_ip, "get_interface_address failed with status {}", board_ip.status());
-
- const auto board_port = BYTE_ORDER__ntohs(udp.m_device_address.sin_port);
-
- auto tc = TrafficControl::create(board_ip.value(), board_port, rate_bytes_per_sec);
- CHECK_EXPECTED(tc, "Creating traffic control at rate {} failed with error {}", rate_bytes_per_sec, tc.status());
-
- auto status = HAILO_UNINITIALIZED;
- // Note: we don't use make_unique because TrafficControlEthernetInputStream's ctor is private
- auto tc_ptr = std::unique_ptr<TrafficControlEthernetInputStream>(new (std::nothrow)
- TrafficControlEthernetInputStream(device, std::move(udp), std::move(network_group_activated_event), rate_bytes_per_sec,
- tc.release(), layer_info, status));
- CHECK_AS_EXPECTED(nullptr != tc_ptr, HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return tc_ptr;
-}
-
-Expected<std::string> TrafficControlEthernetInputStream::get_interface_address(const struct in_addr *addr)
-{
- auto ip = Buffer::create(IPV4_STRING_MAX_LENGTH, 0);
- CHECK_EXPECTED(ip);
-
- const auto result = Socket::ntop(AF_INET, addr, ip->as_pointer<char>(), EthernetUtils::MAX_INTERFACE_SIZE);
- CHECK_SUCCESS_AS_EXPECTED(result, "Failed parsing IP to string with status {}", result);
-
- return ip->to_string();
-}
-
-TrafficControlEthernetInputStream::TrafficControlEthernetInputStream(Device &device, Udp &&udp,
- EventPtr &&network_group_activated_event, uint32_t rate_bytes_per_sec, TrafficControl &&tc, const LayerInfo &layer_info, hailo_status &status) :
- EthernetInputStreamRateLimited(device, std::move(udp), std::move(network_group_activated_event), rate_bytes_per_sec, layer_info, status),
- m_tc(std::move(tc))
-{}
-#endif
-
-hailo_status EthernetInputStream::eth_stream__write_all_with_sync(void *buffer, size_t offset, size_t size) {
- hailo_status status = HAILO_UNINITIALIZED;
- size_t number_of_frames = 0;
- size_t frame_size = m_stream_info.hw_frame_size;
-
- if (0 != (size % frame_size)) {
- LOGGER__ERROR("Read size is not a multiple of frame size."
- "This operation is not possible with the sync packet mode."
- "Tried to read {} bytes and frame size is {}", size, m_stream_info.hw_frame_size);
- return HAILO_INVALID_ARGUMENT;
- }
-
- number_of_frames = size / frame_size;
- for (size_t i = 0; i < number_of_frames; i++) {
- // Write frame by frame, whereas the remainder packet is the sync packet
- status = eth_stream__write_with_remainder(buffer, offset, frame_size, this->configuration.sync_size);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("eth_stream__write_with_remainder was aborted!");
- return status;
- }
- CHECK_SUCCESS(status);
- offset += frame_size;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status EthernetInputStream::eth_stream__config_input_sync_params(uint32_t frames_per_sync)
-{
- size_t packet_size = MAX_UDP_PAYLOAD_SIZE;
-
- if (MAX_UDP_PAYLOAD_SIZE >= m_stream_info.hw_frame_size) {
- LOGGER__WARNING("Input size that isn't larger than {} doesn't benefit from sync, disabling..", MAX_UDP_PAYLOAD_SIZE);
- this->configuration.is_sync_enabled = false;
- return HAILO_SUCCESS;
- }
- this->configuration.is_sync_enabled = true;
- CHECK(1 == frames_per_sync, HAILO_NOT_IMPLEMENTED,
- "Currently not supported frames_per_sync != 1");
- this->configuration.frames_per_sync = frames_per_sync;
- //if we have padding, consider it when determining the number of packets
- if (this->configuration.use_dataflow_padding) {
- packet_size = MAX_UDP_PADDED_PAYLOAD_SIZE;
- }
- // Data packets per frame are all of the packets except the sync
- this->configuration.packets_per_frame = (uint32_t) ceil((double) m_stream_info.hw_frame_size / (double) packet_size) - 1;
- if (0 == (m_stream_info.hw_frame_size % packet_size)) {
- // If there is no remainder to make the sync packet, we will "cut" it from the last data packet, thus increasing the number of packets.
- this->configuration.packets_per_frame++;
- }
- // Make the remainder packet the sync packet
- this->configuration.sync_size = (uint16_t)(m_stream_info.hw_frame_size % packet_size);
-
- if (MIN_UDP_PAYLOAD_SIZE > this->configuration.sync_size) {
- // If the remainder isn't big enough, we'll "cut" from the last data packet enough to fill the minimum size.
- this->configuration.sync_size = MIN_UDP_PAYLOAD_SIZE;
- }
- LOGGER__DEBUG("Configured sync size {}, packets per frame {}", this->configuration.sync_size, this->configuration.packets_per_frame);
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<EthernetInputStream>> EthernetInputStream::create(Device &device,
- const LayerInfo &edge_layer, const hailo_eth_input_stream_params_t ¶ms, EventPtr network_group_activated_event)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- // TODO: try to avoid cast
- auto eth_device = reinterpret_cast<EthernetDevice*>(&device);
- std::unique_ptr<EthernetInputStream> local_stream;
-
- auto stream_index = edge_layer.stream_index;
- auto udp = eth_stream__create_udp(eth_device, params.host_address, stream_index, params.device_port, true);
- CHECK_EXPECTED(udp);
-
- if (params.rate_limit_bytes_per_sec == 0) {
- local_stream = std::unique_ptr<EthernetInputStream>(
- new (std::nothrow) EthernetInputStream(device, udp.release(), std::move(network_group_activated_event), edge_layer, status));
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else {
-#ifdef _MSC_VER
- // TODO: Add factory class
- local_stream = std::unique_ptr<EthernetInputStream>(
- new (std::nothrow) TokenBucketEthernetInputStream(device, udp.release(),
- std::move(network_group_activated_event), params.rate_limit_bytes_per_sec, edge_layer, status));
- CHECK_SUCCESS_AS_EXPECTED(status);
-#else
- auto stream_expected = TrafficControlEthernetInputStream::create(device, udp.release(),
- std::move(network_group_activated_event), params.rate_limit_bytes_per_sec, edge_layer);
- CHECK_EXPECTED(stream_expected);
- local_stream = stream_expected.release();
-#endif
- }
-
- CHECK_AS_EXPECTED((nullptr != local_stream), HAILO_OUT_OF_HOST_MEMORY);
- local_stream->m_is_stream_activated = false;
-
- auto device_architecture = eth_device->get_architecture();
- CHECK_EXPECTED(device_architecture);
- if ((HAILO_ARCH_HAILO8 == device_architecture.value()) || (HAILO_ARCH_HAILO8L == device_architecture.value())) {
- local_stream->configuration.use_dataflow_padding = true;
- }
- else {
- local_stream->configuration.use_dataflow_padding = false;
- }
-
- local_stream->set_max_payload_size(params.max_payload_size);
-
- local_stream->configuration.is_sync_enabled = params.is_sync_enabled;
- if (local_stream->configuration.is_sync_enabled) {
- status = local_stream->eth_stream__config_input_sync_params(params.frames_per_sync);
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- local_stream->configuration.buffers_threshold = params.buffers_threshold;
-
- return local_stream;
-}
-
-void EthernetInputStream::set_max_payload_size(uint16_t size)
-{
- if (size > MAX_UDP_PAYLOAD_SIZE) {
- size = MAX_UDP_PAYLOAD_SIZE;
- }
- this->configuration.max_payload_size = size;
-}
-
-hailo_status EthernetInputStream::set_timeout(std::chrono::milliseconds timeout)
-{
- return m_udp.set_timeout(timeout);
-}
-
-std::chrono::milliseconds EthernetInputStream::get_timeout() const
-{
- return std::chrono::milliseconds((MILLISECONDS_IN_SECOND * m_udp.m_timeout.tv_sec) + (m_udp.m_timeout.tv_usec / MICROSECONDS_IN_MILLISECOND));
-}
-
-uint16_t EthernetInputStream::get_remote_port()
-{
- return ntohs(m_udp.m_device_address.sin_port);
-}
-
-/** Output stream **/
-EthernetOutputStream::~EthernetOutputStream()
-{
- if (m_is_stream_activated) {
- auto status = this->deactivate_stream();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Close stream failed! (status {} stream index {})", status, m_stream_info.index);
- }
- }
-}
-
-hailo_status EthernetOutputStream::deactivate_stream()
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- ASSERT(m_is_stream_activated);
-
- status = Control::close_stream(m_device, m_dataflow_manager_id, false);
- CHECK_SUCCESS(status);
-
- m_is_stream_activated = false;
-
- return HAILO_SUCCESS;
-}
-
-// Note: Ethernet streams don't work with dynamic batch sizes
-hailo_status EthernetOutputStream::activate_stream(uint16_t /* dynamic_batch_size */)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- CONTROL_PROTOCOL__config_stream_params_t params = {};
-
- params.nn_stream_config = m_nn_stream_config;
- params.communication_type = CONTROL_PROTOCOL__COMMUNICATION_TYPE_UDP;
- params.is_input = false;
- params.stream_index = m_stream_info.index;
- params.skip_nn_stream_config = false;
- // Currently hardcoded assign as there are no power mode optimizations over eth
- params.power_mode = static_cast<uint8_t>(CONTROL_PROTOCOL__MODE_ULTRA_PERFORMANCE);
-
- params.communication_params.udp_output.chip_udp_port = (uint16_t)(BYTE_ORDER__htons(m_udp.m_device_address.sin_port));
- params.communication_params.udp_output.host_udp_port = (uint16_t)(BYTE_ORDER__htons(m_udp.m_host_address.sin_port));
- params.communication_params.udp_output.max_udp_payload_size = this->configuration.max_payload_size;
- params.communication_params.udp_output.buffers_threshold = this->configuration.buffers_threshold;
- params.communication_params.udp_output.use_rtp = false;
-
- if (this->configuration.is_sync_enabled) {
- params.communication_params.udp_output.should_send_sync_packets = true;
- }
-
- status = Control::config_stream_udp_output(m_device, ¶ms, m_dataflow_manager_id);
- CHECK_SUCCESS(status);
-
- status = Control::open_stream(m_device, m_dataflow_manager_id, false);
- CHECK_SUCCESS(status);
-
- m_is_stream_activated = true;
-
- return HAILO_SUCCESS;
-}
-
-hailo_status EthernetOutputStream::read_all_no_sync(void *buffer, size_t offset, size_t size) {
- size_t offset_end = 0;
- size_t transfer_size = 0;
-
- offset_end = offset + size;
- while (offset < offset_end) {
- transfer_size = offset_end - offset;
- MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, transfer_size);
- auto expected_bytes_read = this->sync_read_raw_buffer(buffer_view);
- if (HAILO_STREAM_ABORTED_BY_USER == expected_bytes_read.status()) {
- LOGGER__INFO("sync_read_raw_buffer was aborted!");
- return expected_bytes_read.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_bytes_read);
- offset += expected_bytes_read.release();
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status EthernetOutputStream::read_all_with_sync(void *buffer, size_t offset, size_t size) {
- hailo_status status = HAILO_UNINITIALIZED;
- size_t initial_offset = offset;
- size_t offset_end = offset + size;
- bool got_last_sync_early = false;
- const size_t frame_size = m_stream_info.hw_frame_size;
- bool is_batch_invalid = false;
-
- if ((size % frame_size) != 0) {
- LOGGER__ERROR("Read size is not a multiple of frame size."
- "This operation is not possible with the sync packet mode."
- "Tried to read {} bytes and frame size is {}", size, frame_size);
- return HAILO_INVALID_ARGUMENT;
- }
-
- if (this->leftover_size > 0) {
- memcpy((uint8_t*)buffer + offset, this->leftover_buffer, this->leftover_size);
- offset += this->leftover_size;
- // leftover size will be reassigned in the end, but in case the function ends prematurely we will zero it for safety.
- this->leftover_size = 0;
- }
-
- while (offset < offset_end) {
- size_t transfer_size = offset_end - offset;
- MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, transfer_size);
- auto expected_bytes_read = this->sync_read_raw_buffer(buffer_view);
- status = expected_bytes_read.status();
- if (HAILO_TIMEOUT == status) {
- return handle_timeout(buffer, offset, initial_offset, frame_size);
- } else if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("sync_read_raw_buffer was aborted");
- return status;
- } else if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("read failed");
- return status;
- }
- transfer_size = expected_bytes_read.release();
- if (is_sync_packet(buffer, offset, transfer_size)) {
- uint32_t sequence_index = BYTE_ORDER__ntohl(((hailo_output_sync_packet_t*)((uint8_t*)buffer + offset))->sequence_index);
- if (is_sync_expected(offset, initial_offset, frame_size)) {
- if (sequence_index != (this->last_seen_sync_index + 1)) {
- // Batch is invalid if a frame was skipped
- is_batch_invalid = true;
- LOGGER__WARNING("Received {} frames. Missed sync packets between them, treating the batch as invalid data", sequence_index - this->last_seen_sync_index);
- }
- if (sequence_index == this->last_seen_sync_index) {
- LOGGER__ERROR("Got duplicate sync!");
- return HAILO_INTERNAL_FAILURE;
- }
- } else {
- size_t number_of_missing_bytes = (frame_size - ((offset - initial_offset) % frame_size));
- LOGGER__WARNING("Some bytes are missing at frame, padding {} bytes with zeros", number_of_missing_bytes);
- memset((uint8_t*)buffer + offset, 0, number_of_missing_bytes);
- offset += number_of_missing_bytes;
- if (offset == offset_end) {
- got_last_sync_early = true;
- }
- is_batch_invalid = true;
- }
- this->last_seen_sync_index = sequence_index;
- } else {
- offset += transfer_size;
- }
- }
-
- status = HAILO_SUCCESS;
-
- if (!got_last_sync_early) {
- status = get_last_sync();
- }
- if (HAILO_SUCCESS == status && is_batch_invalid) {
- return HAILO_INVALID_FRAME;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status EthernetOutputStream::get_last_sync() {
- size_t last_packet_size = sizeof(this->leftover_buffer);
- MemoryView leftover_buffer_view(this->leftover_buffer, last_packet_size);
- auto expected_bytes_read = sync_read_raw_buffer(leftover_buffer_view);
- CHECK(HAILO_TIMEOUT != expected_bytes_read.status(), HAILO_INVALID_FRAME, "Got timeout on last sync, marking last frame as invalid");
- CHECK_EXPECTED_AS_STATUS(expected_bytes_read, "Recv error");
- last_packet_size = expected_bytes_read.release();
-
- if (is_sync_packet(this->leftover_buffer, 0, last_packet_size)) {
- this->leftover_size = 0;
- } else {
- LOGGER__WARNING("Received a data packet instead of sync, saving leftover for later frame");
- this->leftover_size = last_packet_size;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status EthernetOutputStream::handle_timeout(const void* buffer, size_t offset,
- size_t initial_offset, const size_t frame_size) {
- // In case data a timeout has occurred, and data was received, try filling missing in frame
- if (this->encountered_timeout || (offset == initial_offset)) {
- LOGGER__ERROR("{} (D2H) got timeout (timeout={}ms), unable to complete the frame", name(), get_timeout().count());
- return HAILO_TIMEOUT;
- }
- LOGGER__ERROR("Received timeout. Continuing logic as if a sync packet was received");
- size_t number_of_missing_bytes = (frame_size - ((offset - initial_offset) % frame_size));
- LOGGER__ERROR("padding {} bytes with zeros because of timeout", number_of_missing_bytes);
- memset((uint8_t*)buffer + offset, 0, number_of_missing_bytes);
- this->encountered_timeout = true;
- return HAILO_INVALID_FRAME;
-}
-
-bool EthernetOutputStream::is_sync_expected(size_t offset, size_t initial_offset, const size_t frame_size) {
- return (((offset - initial_offset) % frame_size) == 0) && (offset > initial_offset);
-}
-
-bool EthernetOutputStream::is_sync_packet(const void* buffer, size_t offset, size_t transfer_size) {
- return (transfer_size == sizeof(hailo_output_sync_packet_t) &&
- ((hailo_output_sync_packet_t*)((uint8_t*)buffer + offset))->barker == BYTE_ORDER__ntohl(SYNC_PACKET_BARKER));
-}
-
-hailo_status EthernetOutputStream::read_all(MemoryView &buffer)
-{
- if ((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) != 0) {
- LOGGER__ERROR("Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
- return HAILO_INVALID_ARGUMENT;
- }
-
- hailo_status status = HAILO_UNINITIALIZED;
- if (this->configuration.is_sync_enabled) {
- status = this->read_all_with_sync(buffer.data(), 0, buffer.size());
- } else {
- status = this->read_all_no_sync(buffer.data(), 0, buffer.size());
- }
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("read_all was aborted!");
- return status;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<size_t> EthernetOutputStream::sync_read_raw_buffer(MemoryView &buffer)
-{
- auto status = get_network_group_activated_event()->wait(std::chrono::milliseconds(0));
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED,
- "Trying to read on stream before its network_group is activated");
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto buffer_size = buffer.size();
- status = m_udp.recv((uint8_t*)buffer.data(),&buffer_size);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Udp recv was aborted!");
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status, "{} (D2H) failed with status={}", name(), status);
-
- return buffer_size;
-}
-
-hailo_status EthernetOutputStream::fill_output_stream_ptr_with_info(const hailo_eth_output_stream_params_t ¶ms, EthernetOutputStream *stream)
-{
- if ((HAILO_FORMAT_ORDER_HAILO_NMS == stream->m_stream_info.format.order)
- && (params.is_sync_enabled)) {
- LOGGER__WARNING("NMS is not supported with sync enabled. Setting sync flag to false");
- stream->configuration.is_sync_enabled = false;
- } else {
- stream->configuration.is_sync_enabled = params.is_sync_enabled;
- }
-
- stream->configuration.max_payload_size = params.max_payload_size;
- stream->configuration.buffers_threshold = params.buffers_threshold;
-
- stream->m_is_stream_activated = false;
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<EthernetOutputStream>> EthernetOutputStream::create(Device &device,
- const LayerInfo &edge_layer, const hailo_eth_output_stream_params_t ¶ms, EventPtr network_group_activated_event)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- std::unique_ptr<EthernetOutputStream> local_stream = nullptr;
- // TODO: try to avoid cast
- auto eth_device = reinterpret_cast<EthernetDevice*>(&device);
-
- const auto stream_index = edge_layer.stream_index;
- auto udp = eth_stream__create_udp(eth_device, params.host_address, stream_index, params.device_port, false);
- CHECK_EXPECTED(udp);
- local_stream = std::unique_ptr<EthernetOutputStream>(new (std::nothrow) EthernetOutputStream(device,
- edge_layer,
- udp.release(), std::move(network_group_activated_event), status));
- CHECK((nullptr != local_stream), make_unexpected(HAILO_OUT_OF_HOST_MEMORY));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = fill_output_stream_ptr_with_info(params, local_stream.get());
- CHECK_SUCCESS_AS_EXPECTED(status);
- return local_stream;
-}
-
-hailo_status EthernetOutputStream::set_timeout(std::chrono::milliseconds timeout)
-{
- return m_udp.set_timeout(timeout);
-}
-
-std::chrono::milliseconds EthernetOutputStream::get_timeout() const
-{
- return std::chrono::milliseconds((MILLISECONDS_IN_SECOND * m_udp.m_timeout.tv_sec) + (m_udp.m_timeout.tv_usec / MICROSECONDS_IN_MILLISECOND));
-}
-
-hailo_status EthernetOutputStream::abort()
-{
- return m_udp.abort();
-}
-
-hailo_status EthernetInputStream::abort()
-{
- return m_udp.abort();
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file eth_stream.hpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#ifndef HAILO_ETH_STREAM_H_
-#define HAILO_ETH_STREAM_H_
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "token_bucket.hpp"
-#include "udp.hpp"
-#include "hailo/hef.hpp"
-#include "hailo/device.hpp"
-#include "hailo/event.hpp"
-
-#if defined(__GNUC__)
-#include "common/os/posix/traffic_control.hpp"
-#endif
-
-namespace hailort
-{
-
-// TODO: move those structs to hailort.h when implemented
-typedef struct {
- uint16_t max_payload_size;
- bool use_dataflow_padding;
- bool is_sync_enabled;
- uint32_t frames_per_sync;
- uint32_t packets_per_frame;
- uint16_t sync_size;
- uint32_t buffers_threshold;
-} hailo_stream_eth_input_configuration_t;
-
-typedef struct {
- uint16_t max_payload_size;
- bool is_sync_enabled;
- uint32_t buffers_threshold;
-} hailo_stream_eth_output_configuration_t;
-
-class EthernetInputStream : public InputStreamBase {
-private:
- hailo_stream_eth_input_configuration_t configuration;
- Udp m_udp;
- bool m_is_stream_activated;
- Device &m_device;
-
- hailo_status eth_stream__config_input_sync_params(uint32_t frames_per_sync);
- hailo_status eth_stream__write_all_no_sync(void *buffer, size_t offset, size_t size);
- hailo_status eth_stream__write_all_with_sync(void *buffer, size_t offset, size_t size);
- hailo_status set_timeout(std::chrono::milliseconds timeout);
- void set_max_payload_size(uint16_t size);
-
-protected:
- virtual hailo_status eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size);
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
- virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
-
-public:
- EthernetInputStream(Device &device, Udp &&udp, EventPtr &&network_group_activated_event, const LayerInfo &layer_info, hailo_status &status) :
- InputStreamBase(layer_info, HAILO_STREAM_INTERFACE_ETH, std::move(network_group_activated_event), status), m_udp(std::move(udp)), m_device(device) {}
- EthernetInputStream(EthernetInputStream&& other) :
- InputStreamBase(std::move(other)),
- configuration(std::move(other.configuration)),
- m_udp(std::move(other.m_udp)),
- m_is_stream_activated(std::exchange(other.m_is_stream_activated, false)),
- m_device(other.m_device)
- {}
-
- virtual ~EthernetInputStream();
-
- static Expected<std::unique_ptr<EthernetInputStream>> create(Device &device,
- const LayerInfo &edge_layer, const hailo_eth_input_stream_params_t ¶ms, EventPtr network_group_activated_event);
-
- uint16_t get_remote_port();
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_ETH; }
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override {return HAILO_SUCCESS;}; // TODO (HRT-3799): clear abort state in the eth stream
-};
-
-class EthernetInputStreamRateLimited : public EthernetInputStream {
-protected:
- const uint32_t rate_bytes_per_sec;
-
-public:
- EthernetInputStreamRateLimited(Device &device, Udp &&udp, EventPtr &&network_group_activated_event,
- uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status);
- EthernetInputStreamRateLimited(EthernetInputStreamRateLimited &&other);
- virtual ~EthernetInputStreamRateLimited() = default;
-};
-
-class TokenBucketEthernetInputStream : public EthernetInputStreamRateLimited {
-private:
- DynamicTokenBucket token_bucket;
- // Note:
- // * We set the token bucket's burst size to be our MTU. If we'd use larger burst sizes
- // we could send packets faster than the desired rate.
- // * We send packets with at most MAX_UDP_PAYLOAD_SIZE bytes of data. Hence we won't
- // consume more than MAX_UDP_PAYLOAD_SIZE tokens from the token bucket.
- static const uint32_t BURST_SIZE = MAX_UDP_PAYLOAD_SIZE;
- static const uint32_t MAX_CONSUME_SIZE = MAX_UDP_PAYLOAD_SIZE;
-
-protected:
- virtual hailo_status eth_stream__write_with_remainder(void *buffer, size_t offset, size_t size, size_t remainder_size);
-
-public:
- TokenBucketEthernetInputStream(Device &device, Udp &&udp, EventPtr &&network_group_activated_event,
- uint32_t rate_bytes_per_sec, const LayerInfo &layer_info, hailo_status &status);
- TokenBucketEthernetInputStream(TokenBucketEthernetInputStream &&other);
- virtual ~TokenBucketEthernetInputStream() = default;
-};
-
-
-#if defined(__GNUC__)
-class TrafficControlEthernetInputStream : public EthernetInputStreamRateLimited {
-public:
- static Expected<std::unique_ptr<TrafficControlEthernetInputStream>> create(Device &device, Udp &&udp,
- EventPtr &&network_group_activated_event, uint32_t rate_bytes_per_sec, const LayerInfo &layer_info);
- TrafficControlEthernetInputStream(TrafficControlEthernetInputStream&& other) = default;
- virtual ~TrafficControlEthernetInputStream() = default;
-
-private:
- TrafficControlEthernetInputStream(Device &device, Udp &&udp, EventPtr &&network_group_activated_event,
- uint32_t rate_bytes_per_sec, TrafficControl &&tc, const LayerInfo &layer_info, hailo_status &status);
- static Expected<std::string> get_interface_address(const struct in_addr *addr);
-
- TrafficControl m_tc;
-};
-#endif
-
-class EthernetOutputStream : public OutputStreamBase {
-private:
- uint8_t leftover_buffer[MAX_UDP_PAYLOAD_SIZE];
- size_t leftover_size = 0;
- uint32_t last_seen_sync_index;
- bool encountered_timeout;
- hailo_stream_eth_output_configuration_t configuration;
- Udp m_udp;
- bool m_is_stream_activated;
- Device &m_device;
-
- EthernetOutputStream(Device &device, const LayerInfo &edge_layer, Udp &&udp, EventPtr &&network_group_activated_event, hailo_status &status) :
- OutputStreamBase(edge_layer, std::move(network_group_activated_event), status),
- leftover_buffer(),
- leftover_size(0),
- // Firmware starts sending sync sequence from 0, so treating the first previous as max value (that will be overflowed to 0)
- last_seen_sync_index(std::numeric_limits<uint32_t>::max()),
- encountered_timeout(false),
- configuration(),
- m_udp(std::move(udp)),
- m_device(device)
- {}
-
- hailo_status read_all(MemoryView &buffer) override;
- hailo_status read_all_with_sync(void *buffer, size_t offset, size_t size);
- hailo_status read_all_no_sync(void *buffer, size_t offset, size_t size);
-
- static bool is_sync_packet(const void* buffer, size_t offset, size_t transfer_size);
- static bool is_sync_expected(size_t offset, size_t initial_offset, size_t frame_size);
- hailo_status handle_timeout(const void* buffer, size_t offset, size_t initial_offset, size_t frame_size);
- hailo_status set_timeout(std::chrono::milliseconds timeout);
- hailo_status get_last_sync();
-
- static hailo_status fill_output_stream_ptr_with_info(const hailo_eth_output_stream_params_t ¶ms, EthernetOutputStream *stream);
-
-public:
- EthernetOutputStream(EthernetOutputStream&& other) :
- OutputStreamBase(std::move(other)),
- leftover_buffer(),
- leftover_size(std::move(other.leftover_size)),
- last_seen_sync_index(std::move(other.last_seen_sync_index)),
- encountered_timeout(std::move(other.encountered_timeout)),
- configuration(std::move(other.configuration)),
- m_udp(std::move(other.m_udp)),
- m_is_stream_activated(std::exchange(other.m_is_stream_activated, false)),
- m_device(other.m_device)
- {
- memcpy(leftover_buffer, other.leftover_buffer, sizeof(leftover_buffer));
- }
-
- virtual ~EthernetOutputStream();
-
- virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer);
-
- static Expected<std::unique_ptr<EthernetOutputStream>> create(Device &device, const LayerInfo &edge_layer,
- const hailo_eth_output_stream_params_t ¶ms, EventPtr network_group_activated_event);
-
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_ETH; }
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override {return HAILO_SUCCESS;}; // TODO (HRT-3799): clear abort state in the eth stream
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_ETH_STREAM_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file event.hpp
- * @brief Event and Semaphore wrapper objects used for multithreading
- **/
-
-#ifndef _EVENT_INTERNAL_HPP_
-#define _EVENT_INTERNAL_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-
-#include <memory>
-#include <vector>
-#include <array>
-#include <chrono>
-#if defined(__GNUC__)
-#include <poll.h>
-#endif
-
-namespace hailort
-{
-
-// TODO: Replace with a static wait_multiple func belonging to Waitable (SDK-16567).
-// Will get a vector of pointers as an argument. Can also use variadic
-// template args for cases with fixed number Waitables
-class WaitOrShutdown final
-{
-public:
- WaitOrShutdown(WaitablePtr waitable, EventPtr shutdown_event);
- ~WaitOrShutdown() = default;
-
- WaitOrShutdown(const WaitOrShutdown &other) = delete;
- WaitOrShutdown &operator=(const WaitOrShutdown &other) = delete;
- WaitOrShutdown(WaitOrShutdown &&other) noexcept = default;
- WaitOrShutdown &operator=(WaitOrShutdown &&other) = delete;
-
- // Waits on waitable or shutdown_event to be signaled:
- // * If shutdown_event is signaled:
- // - shutdown_event is not reset
- // - HAILO_SHUTDOWN_EVENT_SIGNALED is returned
- // * If waitable is signaled:
- // - waitable is reset if waitable->is_auto_reset()
- // - HAILO_SUCCESS is returned
- // * If both waitable and shutdown_event are signaled:
- // - shutdown_event is not reset
- // - waitable is not reset
- // - HAILO_SHUTDOWN_EVENT_SIGNALED is returned
- // * If neither are signaled, then HAILO_TIMEOUT is returned
- // * On any failure an appropriate status shall be returned
- hailo_status wait(std::chrono::milliseconds timeout);
- hailo_status signal();
-
-private:
- // Note: We want to guarantee that if the shutdown event is signaled, HAILO_SHUTDOWN_EVENT_SIGNALED will be
- // returned.
- // * In Unix, using poll this isn't a problem since we'll get all the readable fds in a single call.
- // * In Windows, using WaitForMultipleObjects, this works differently (from msdn):
- // If bWaitAll is FALSE, the return value minus WAIT_OBJECT_0 indicates the lpHandles array index
- // of the object that satisfied the wait. If more than one object became signaled during the call,
- // this is the array index of the signaled object with the smallest index value of all the signaled
- // objects.
- // (https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitformultipleobjects)
- // * Hence, SHUTDOWN_INDEX must come before WAITABLE_INDEX!
- static const size_t SHUTDOWN_INDEX = 0;
- static const size_t WAITABLE_INDEX = 1;
- #if defined(_MSC_VER) || defined(__QNX__)
- using WaitHandleArray = std::array<underlying_waitable_handle_t, 2>;
- #else
- using WaitHandleArray = std::array<struct pollfd, 2>;
- #endif
-
- const WaitablePtr m_waitable;
- const EventPtr m_shutdown_event;
- WaitHandleArray m_wait_handle_array;
-
- static WaitHandleArray create_wait_handle_array(WaitablePtr waitable, EventPtr shutdown_event);
-};
-
-} /* namespace hailort */
-
-#endif /* _EVENT_INTERNAL_HPP_ */
#include "hailo/event.hpp"
#include "hailo/network_rate_calculator.hpp"
#include "hailo/inference_pipeline.hpp"
-#include "eth_device.hpp"
-#include "pcie_device.hpp"
-#include "pcie_stream.hpp"
-#include "eth_stream.hpp"
-#include "control.hpp"
-#include "sensor_config_utils.hpp"
+
#include "common/compiler_extensions_compat.hpp"
-#include "hailort_logger.hpp"
-#include "shared_resource_manager.hpp"
-#include "vdevice_internal.hpp"
-#include "tracer_macros.hpp"
+#include "common/os_utils.hpp"
+
+#include "device_common/control.hpp"
+#include "eth/eth_device.hpp"
+#include "eth/eth_stream.hpp"
+#include "vdma/pcie/pcie_device.hpp"
+#include "utils/sensor_config_utils.hpp"
+#include "utils/hailort_logger.hpp"
+#include "utils/shared_resource_manager.hpp"
+#include "vdevice/vdevice_internal.hpp"
+#include "utils/profiler/tracer_macros.hpp"
#include <chrono>
+
using namespace hailort;
COMPAT__INITIALIZER(hailort__initialize_logger)
{
// Init logger singleton if compiling only HailoRT
(void) HailoRTLogger::get_instance();
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ (void) HailoRTOSLogger::get_instance();
+#endif
(void) SharedResourceManager<std::string, VDeviceBase>::get_instance();
TRACE(InitTrace);
}
case Device::Type::ETH:
*device_type = HAILO_DEVICE_TYPE_ETH;
break;
- case Device::Type::CORE:
- *device_type = HAILO_DEVICE_TYPE_CORE;
+ case Device::Type::INTEGRATED:
+ *device_type = HAILO_DEVICE_TYPE_INTEGRATED;
break;
default:
LOGGER__ERROR("Internal failure, invalid device type returned");
hailo_activate_network_group_params_t actual_activation_params = (activation_params != nullptr) ?
*activation_params :
- HailoRTDefaults::get_network_group_params();
+ HailoRTDefaults::get_active_network_group_params();
auto net_group_ptr = reinterpret_cast<ConfiguredNetworkGroup*>(network_group);
auto activated_net_group = net_group_ptr->activate(actual_activation_params);
CHECK_ARG_NOT_NULL(configured_network_group);
std::string network_name_str = (nullptr == network_name) ? "" : network_name;
- return ((ConfiguredNetworkGroup*)configured_network_group)->set_scheduler_timeout(std::chrono::milliseconds(timeout_ms), network_name_str);
+ return (reinterpret_cast<ConfiguredNetworkGroup*>(configured_network_group))->set_scheduler_timeout(std::chrono::milliseconds(timeout_ms), network_name_str);
}
hailo_status hailo_set_scheduler_threshold(hailo_configured_network_group configured_network_group,
CHECK_ARG_NOT_NULL(configured_network_group);
std::string network_name_str = (nullptr == network_name) ? "" : network_name;
- return ((ConfiguredNetworkGroup*)configured_network_group)->set_scheduler_threshold(threshold, network_name_str);
+ return (reinterpret_cast<ConfiguredNetworkGroup*>(configured_network_group))->set_scheduler_threshold(threshold, network_name_str);
+}
+
+hailo_status hailo_set_scheduler_priority(hailo_configured_network_group configured_network_group, uint8_t priority, const char *network_name)
+{
+ CHECK_ARG_NOT_NULL(configured_network_group);
+
+ std::string network_name_str = (nullptr == network_name) ? "" : network_name;
+ return (reinterpret_cast<ConfiguredNetworkGroup*>(configured_network_group))->set_scheduler_priority(priority, network_name_str);
}
hailo_status hailo_calculate_eth_input_rate_limits(hailo_hef hef, const char *network_group_name, uint32_t fps,
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hailort_common.cpp
- * @brief Implementation of common hailort utilities
- **/
-
-#include "hailo/hailort_common.hpp"
-#include "common/utils.hpp"
-
-namespace hailort
-{
-
-// Needed for the linker
-const uint32_t HailoRTCommon::BBOX_PARAMS;
-const uint32_t HailoRTCommon::MAX_DEFUSED_LAYER_COUNT;
-const size_t HailoRTCommon::HW_DATA_ALIGNMENT;
-const uint64_t HailoRTCommon::NMS_DELIMITER;
-const uint64_t HailoRTCommon::NMS_DUMMY_DELIMITER;
-
-Expected<hailo_device_id_t> HailoRTCommon::to_device_id(const std::string &device_id)
-{
- hailo_device_id_t id = {};
- static constexpr size_t id_size = ARRAY_ENTRIES(id.id);
-
- CHECK_AS_EXPECTED(device_id.size() < id_size, HAILO_INTERNAL_FAILURE,
- "Device '{}' has a too long id (max is {})", device_id, id_size);
-
- strncpy(id.id, device_id.c_str(), id_size - 1);
- id.id[id_size - 1] = 0;
- return id;
-}
-
-Expected<std::vector<hailo_device_id_t>> HailoRTCommon::to_device_ids_vector(const std::vector<std::string> &device_ids_str)
-{
- std::vector<hailo_device_id_t> device_ids_vector;
- device_ids_vector.reserve(device_ids_str.size());
- for (const auto &device_id_str : device_ids_str) {
- auto device_id_struct = to_device_id(device_id_str);
- CHECK_EXPECTED(device_id_struct);
- device_ids_vector.push_back(device_id_struct.release());
- }
- return device_ids_vector;
-}
-
-} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_defaults.cpp
+ * @brief Implmentation of hailort_defaults
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/logger_macros.hpp"
+#include "common/utils.hpp"
+
+
+namespace hailort
+{
+
+static const hailo_format_order_t DEFAULT_FORMAT_ORDER_MAP[] = {
+ // Key is device_format_order, value is default user_format_order
+ HAILO_FORMAT_ORDER_AUTO, // HAILO_FORMAT_ORDER_AUTO, - Should not be used!
+ HAILO_FORMAT_ORDER_NHWC, // HAILO_FORMAT_ORDER_NHWC,
+ HAILO_FORMAT_ORDER_NHWC, // HAILO_FORMAT_ORDER_NHCW,
+ HAILO_FORMAT_ORDER_FCR, // HAILO_FORMAT_ORDER_FCR,
+ HAILO_FORMAT_ORDER_F8CR, // HAILO_FORMAT_ORDER_F8CR,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHW,
+ HAILO_FORMAT_ORDER_NC, // HAILO_FORMAT_ORDER_NC,
+ HAILO_FORMAT_ORDER_BAYER_RGB, // HAILO_FORMAT_ORDER_BAYER_RGB,
+ HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB, // HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB
+ HAILO_FORMAT_ORDER_HAILO_NMS, // HAILO_FORMAT_ORDER_HAILO_NMS,
+ HAILO_FORMAT_ORDER_NHWC, // HAILO_FORMAT_ORDER_RGB888,
+ HAILO_FORMAT_ORDER_NCHW, // HAILO_FORMAT_ORDER_NCHW,
+ HAILO_FORMAT_ORDER_YUY2, // HAILO_FORMAT_ORDER_YUY2,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV12,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV21,
+ HAILO_FORMAT_ORDER_NV12, // HAILO_FORMAT_ORDER_HAILO_YYUV,
+ HAILO_FORMAT_ORDER_NV21, // HAILO_FORMAT_ORDER_HAILO_YYVU,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_RGB4,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_I420,
+ HAILO_FORMAT_ORDER_I420 // HAILO_FORMAT_ORDER_HAILO_YYYYUV,
+};
+
+static const hailo_format_order_t DEFAULT_FORMAT_ARGMAX_ORDER_MAP[] = {
+ // Key is device_format_order, value is default user_format_order
+ HAILO_FORMAT_ORDER_AUTO, // HAILO_FORMAT_ORDER_AUTO, - Should not be used!
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHWC,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHCW,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_FCR,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_F8CR,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHW,
+ HAILO_FORMAT_ORDER_NC, // HAILO_FORMAT_ORDER_NC,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_BAYER_RGB,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB,
+ HAILO_FORMAT_ORDER_HAILO_NMS, // HAILO_FORMAT_ORDER_HAILO_NMS,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_RGB888,
+ HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NCHW,
+ HAILO_FORMAT_ORDER_YUY2, // HAILO_FORMAT_ORDER_YUY2,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV12,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV21,
+ HAILO_FORMAT_ORDER_NV12, // HAILO_FORMAT_ORDER_HAILO_YYUV,
+ HAILO_FORMAT_ORDER_NV21, // HAILO_FORMAT_ORDER_HAILO_YYVU,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_RGB4,
+ HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_I420,
+ HAILO_FORMAT_ORDER_I420 // HAILO_FORMAT_ORDER_HAILO_YYYYUV,
+};
+
+// This func must be aligned to SDK!
+Expected<hailo_format_order_t> HailoRTDefaults::get_device_format_order(uint32_t compiler_format_order)
+{
+ switch (compiler_format_order) {
+ case 0:
+ return std::move(HAILO_FORMAT_ORDER_NHWC);
+ break;
+ case 1:
+ return std::move(HAILO_FORMAT_ORDER_NHCW);
+ break;
+ case 2:
+ return std::move(HAILO_FORMAT_ORDER_NC);
+ break;
+ case 3:
+ return std::move(HAILO_FORMAT_ORDER_FCR);
+ break;
+ case 4:
+ return std::move(HAILO_FORMAT_ORDER_BAYER_RGB);
+ break;
+ case 5:
+ return std::move(HAILO_FORMAT_ORDER_NHW);
+ break;
+ case 6:
+ return std::move(HAILO_FORMAT_ORDER_HAILO_NMS);
+ break;
+ case 7:
+ return std::move(HAILO_FORMAT_ORDER_F8CR);
+ break;
+ case 8:
+ return std::move(HAILO_FORMAT_ORDER_RGB888);
+ break;
+ case 11:
+ return std::move(HAILO_FORMAT_ORDER_YUY2);
+ break;
+ case 13:
+ return std::move(HAILO_FORMAT_ORDER_NHWC);
+ break;
+ case 14:
+ return std::move(HAILO_FORMAT_ORDER_HAILO_YYUV);
+ break;
+ case 15:
+ return std::move(HAILO_FORMAT_ORDER_HAILO_YYVU);
+ break;
+ case 16:
+ return std::move(HAILO_FORMAT_ORDER_HAILO_YYYYUV);
+ break;
+ default:
+ LOGGER__ERROR("Invalid compiler_format_order ({})", compiler_format_order);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+}
+
+hailo_format_order_t HailoRTDefaults::get_default_host_format_order(const hailo_format_t &device_format)
+{
+ const bool is_argmax = (0 != (device_format.flags & HAILO_FORMAT_FLAGS_HOST_ARGMAX));
+ if (!is_argmax) {
+ return DEFAULT_FORMAT_ORDER_MAP[device_format.order];
+ } else {
+ return DEFAULT_FORMAT_ARGMAX_ORDER_MAP[device_format.order];
+ }
+}
+
+struct sockaddr_in HailoRTDefaults::get_sockaddr()
+{
+ struct sockaddr_in address{};
+ address.sin_family = AF_INET;
+ address.sin_port = 0;
+ address.sin_addr.s_addr = INADDR_ANY;
+ // sin_zero is already zeroed
+ return address;
+}
+
+hailo_format_t HailoRTDefaults::get_user_buffer_format()
+{
+ return get_user_buffer_format(true, HAILO_FORMAT_TYPE_AUTO);
+}
+
+hailo_format_t HailoRTDefaults::get_user_buffer_format(bool quantized, hailo_format_type_t format_type)
+{
+ hailo_format_t user_buffer_format{};
+ user_buffer_format.type = format_type;
+ user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
+
+ hailo_format_flags_t flags = HAILO_FORMAT_FLAGS_NONE;
+ if (quantized) {
+ flags = static_cast<hailo_format_flags_t>(flags | HAILO_FORMAT_FLAGS_QUANTIZED);
+ }
+
+ user_buffer_format.flags = flags;
+ return user_buffer_format;
+}
+
+hailo_transform_params_t HailoRTDefaults::get_transform_params(bool quantized, hailo_format_type_t format_type)
+{
+ hailo_transform_params_t params{};
+ params.transform_mode = HAILO_STREAM_TRANSFORM_COPY;
+ params.user_buffer_format = get_user_buffer_format(quantized, format_type);
+ return params;
+}
+
+hailo_transform_params_t HailoRTDefaults::get_transform_params()
+{
+ return get_transform_params(true, HAILO_FORMAT_TYPE_AUTO);
+}
+
+hailo_vstream_params_t HailoRTDefaults::get_vstreams_params()
+{
+ return get_vstreams_params(true, HAILO_FORMAT_TYPE_AUTO);
+}
+
+hailo_vstream_params_t HailoRTDefaults::get_vstreams_params(bool quantized, hailo_format_type_t format_type)
+{
+ hailo_vstream_params_t params{};
+ params.user_buffer_format = get_user_buffer_format(quantized, format_type);
+ params.queue_size = HAILO_DEFAULT_VSTREAM_QUEUE_SIZE;
+ params.timeout_ms = HAILO_DEFAULT_VSTREAM_TIMEOUT_MS;
+ params.vstream_stats_flags = HAILO_VSTREAM_STATS_NONE;
+ params.pipeline_elements_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
+ return params;
+}
+
+hailo_transform_params_t HailoRTDefaults::get_transform_params(const hailo_stream_info_t &stream_info)
+{
+ hailo_transform_params_t params{};
+ params.transform_mode = HAILO_STREAM_TRANSFORM_COPY;
+ params.user_buffer_format.type = stream_info.format.type;
+ params.user_buffer_format.order = get_default_host_format_order(stream_info.format);
+ params.user_buffer_format.flags = static_cast<hailo_format_flags_t>(
+ HAILO_FORMAT_FLAGS_QUANTIZED &
+ ~HAILO_FORMAT_FLAGS_TRANSPOSED);
+ return params;
+}
+
+hailo_eth_input_stream_params_t HailoRTDefaults::get_eth_input_stream_params()
+{
+ hailo_eth_input_stream_params_t params{};
+ params.host_address = get_sockaddr();
+ params.device_port = HAILO_DEFAULT_ETH_DEVICE_PORT;
+ params.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
+ params.is_sync_enabled = false;
+ params.frames_per_sync = 0;
+ params.rate_limit_bytes_per_sec = 0;
+ params.buffers_threshold = HAILO_DEFAULT_BUFFERS_THRESHOLD;
+ return params;
+}
+
+hailo_eth_output_stream_params_t HailoRTDefaults::get_eth_output_stream_params()
+{
+ hailo_eth_output_stream_params_t params{};
+ params.host_address = get_sockaddr();
+ params.device_port = HAILO_DEFAULT_ETH_DEVICE_PORT;
+ params.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
+ params.is_sync_enabled = true;
+ return params;
+}
+
+hailo_pcie_input_stream_params_t HailoRTDefaults::get_pcie_input_stream_params()
+{
+ hailo_pcie_input_stream_params_t params{};
+ return params;
+}
+
+hailo_pcie_output_stream_params_t HailoRTDefaults::get_pcie_output_stream_params()
+{
+ hailo_pcie_output_stream_params_t params{};
+ return params;
+}
+
+hailo_integrated_input_stream_params_t HailoRTDefaults::get_integrated_input_stream_params()
+{
+ hailo_integrated_input_stream_params_t params{};
+ return params;
+}
+
+hailo_integrated_output_stream_params_t HailoRTDefaults::get_integrated_output_stream_params()
+{
+ hailo_integrated_output_stream_params_t params{};
+ return params;
+}
+
+hailo_mipi_input_stream_params_t HailoRTDefaults::get_mipi_input_stream_params()
+{
+ hailo_mipi_input_stream_params_t params = {};
+
+ params.mipi_rx_id = 0;
+ params.data_type = HAILO_MIPI_RX_TYPE_RAW_8;
+
+ params.mipi_common_params.img_width_pixels = 1920;
+ params.mipi_common_params.img_height_pixels = 1080;
+ params.mipi_common_params.pixels_per_clock = HAILO_MIPI_PIXELS_PER_CLOCK_4;
+ params.mipi_common_params.number_of_lanes = 2;
+ params.mipi_common_params.clock_selection = HAILO_MIPI_CLOCK_SELECTION_AUTOMATIC;
+ params.mipi_common_params.data_rate = 260;
+ params.mipi_common_params.virtual_channel_index = 0;
+
+ params.isp_enable = false;
+ params.isp_params.isp_img_in_order = HAILO_MIPI_ISP_IMG_IN_ORDER_GR_FIRST;
+ params.isp_params.isp_img_out_data_type = HAILO_MIPI_IMG_OUT_DATA_TYPE_RGB_888;
+ params.isp_params.isp_crop_enable = false;
+ params.isp_params.isp_crop_output_width_pixels = 1920;
+ params.isp_params.isp_crop_output_height_pixels = 1080;
+ params.isp_params.isp_crop_output_width_start_offset_pixels = 0;
+ params.isp_params.isp_crop_output_height_start_offset_pixels = 0;
+ params.isp_params.isp_test_pattern_enable = true;
+ params.isp_params.isp_configuration_bypass = false;
+ params.isp_params.isp_run_time_ae_enable = true;
+ params.isp_params.isp_run_time_awb_enable = true;
+ params.isp_params.isp_run_time_adt_enable = true;
+ params.isp_params.isp_run_time_af_enable = false;
+ params.isp_params.isp_run_time_calculations_interval_ms = 0;
+ params.isp_params.isp_light_frequency = HAILO_MIPI_ISP_LIGHT_FREQUENCY_50HZ;
+
+ return params;
+}
+
+Expected<hailo_stream_parameters_t> HailoRTDefaults::get_stream_parameters(hailo_stream_interface_t interface,
+ hailo_stream_direction_t direction)
+{
+ hailo_stream_parameters_t params = {};
+ params.stream_interface = interface;
+ params.direction = direction;
+ switch (params.stream_interface) {
+ case HAILO_STREAM_INTERFACE_PCIE:
+ if (HAILO_H2D_STREAM == direction) {
+ params.pcie_input_params = get_pcie_input_stream_params();
+ } else {
+ params.pcie_output_params = get_pcie_output_stream_params();
+ }
+ break;
+ case HAILO_STREAM_INTERFACE_INTEGRATED:
+ if (HAILO_H2D_STREAM == direction) {
+ params.integrated_input_params = get_integrated_input_stream_params();
+ } else {
+ params.integrated_output_params = get_integrated_output_stream_params();
+ }
+ break;
+ case HAILO_STREAM_INTERFACE_ETH:
+ if (HAILO_H2D_STREAM == direction) {
+ params.eth_input_params = get_eth_input_stream_params();
+ } else {
+ params.eth_output_params = get_eth_output_stream_params();
+ }
+ break;
+ case HAILO_STREAM_INTERFACE_MIPI:
+ if (HAILO_H2D_STREAM == direction) {
+ params.mipi_input_params = get_mipi_input_stream_params();
+ break;
+ } else {
+ LOGGER__ERROR("Invalid stream interface");
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+ default:
+ LOGGER__ERROR("Invalid stream interface");
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+ return params;
+}
+
+hailo_activate_network_group_params_t HailoRTDefaults::get_active_network_group_params()
+{
+ hailo_activate_network_group_params_t params = {};
+ return params;
+}
+
+ConfigureNetworkParams HailoRTDefaults::get_configure_params(uint16_t batch_size, hailo_power_mode_t power_mode)
+{
+ ConfigureNetworkParams params = {};
+ params.batch_size = batch_size;
+ if (std::getenv("FORCE_POWER_MODE_ULTRA_PERFORMANCE") != nullptr) {
+ power_mode = HAILO_POWER_MODE_ULTRA_PERFORMANCE;
+ }
+ params.power_mode = power_mode;
+ params.latency = HAILO_LATENCY_NONE;
+ return params;
+}
+
+hailo_network_parameters_t HailoRTDefaults::get_network_parameters(uint16_t batch_size)
+{
+ hailo_network_parameters_t params = {};
+ params.batch_size = batch_size;
+
+ return params;
+}
+
+std::string HailoRTDefaults::get_network_name(const std::string &net_group_name)
+{
+ std::string default_network_name = net_group_name +
+ HAILO_DEFAULT_NETWORK_NAME_QUALIFIER +
+ net_group_name;
+
+ return default_network_name;
+}
+
+hailo_format_t HailoRTDefaults::expand_auto_format(const hailo_format_t &host_format, const hailo_format_t &hw_format)
+{
+ auto host_format_copy = host_format;
+ if (HAILO_FORMAT_TYPE_AUTO == host_format_copy.type) {
+ host_format_copy.type = hw_format.type;
+ }
+ if (HAILO_FORMAT_ORDER_AUTO == host_format_copy.order) {
+ host_format_copy.order = get_default_host_format_order(hw_format);
+ }
+ return host_format_copy;
+}
+
+hailo_vdevice_params_t HailoRTDefaults::get_vdevice_params()
+{
+ hailo_vdevice_params_t params = {};
+ params.device_count = HAILO_DEFAULT_DEVICE_COUNT;
+ params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN;
+ params.device_ids = nullptr;
+ params.group_id = HAILO_DEFAULT_VDEVICE_GROUP_ID;
+ params.multi_process_service = false;
+ return params;
+}
+
+} /* namespace hailort */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hailort_defaults.hpp
- * @brief
- **/
-#ifndef HAILORT_DEFAULTS_HPP_
-#define HAILORT_DEFAULTS_HPP_
-
-#include <hailo/hailort.h>
-#include "hailo/expected.hpp"
-#include "hailo/network_group.hpp"
-#include "hailo/hef.hpp"
-#include "common/logger_macros.hpp"
-#include "common/utils.hpp"
-
-namespace hailort
-{
-
-constexpr hailo_format_order_t DEFAULT_FORMAT_ORDER_MAP[] = {
- // Key is device_format_order, value is default user_format_order
- HAILO_FORMAT_ORDER_AUTO, // HAILO_FORMAT_ORDER_AUTO, - Should not be used!
- HAILO_FORMAT_ORDER_NHWC, // HAILO_FORMAT_ORDER_NHWC,
- HAILO_FORMAT_ORDER_NHWC, // HAILO_FORMAT_ORDER_NHCW,
- HAILO_FORMAT_ORDER_FCR, // HAILO_FORMAT_ORDER_FCR,
- HAILO_FORMAT_ORDER_F8CR, // HAILO_FORMAT_ORDER_F8CR,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHW,
- HAILO_FORMAT_ORDER_NC, // HAILO_FORMAT_ORDER_NC,
- HAILO_FORMAT_ORDER_BAYER_RGB, // HAILO_FORMAT_ORDER_BAYER_RGB,
- HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB, // HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB
- HAILO_FORMAT_ORDER_HAILO_NMS, // HAILO_FORMAT_ORDER_HAILO_NMS,
- HAILO_FORMAT_ORDER_NHWC, // HAILO_FORMAT_ORDER_RGB888,
- HAILO_FORMAT_ORDER_NCHW, // HAILO_FORMAT_ORDER_NCHW,
- HAILO_FORMAT_ORDER_YUY2, // HAILO_FORMAT_ORDER_YUY2,
- HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV12,
- HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV21,
- HAILO_FORMAT_ORDER_NV12, // HAILO_FORMAT_ORDER_HAILO_YYUV,
- HAILO_FORMAT_ORDER_NV21, // HAILO_FORMAT_ORDER_HAILO_YYVU,
- HAILO_FORMAT_ORDER_MAX_ENUM // Not used in device side - HAILO_FORMAT_ORDER_RGB4
- };
-
-constexpr hailo_format_order_t DEFAULT_FORMAT_ARGMAX_ORDER_MAP[] = {
- // Key is device_format_order, value is default user_format_order
- HAILO_FORMAT_ORDER_AUTO, // HAILO_FORMAT_ORDER_AUTO, - Should not be used!
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHWC,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHCW,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_FCR,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_F8CR,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NHW,
- HAILO_FORMAT_ORDER_NC, // HAILO_FORMAT_ORDER_NC,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_BAYER_RGB,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB
- HAILO_FORMAT_ORDER_HAILO_NMS, // HAILO_FORMAT_ORDER_HAILO_NMS,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_RGB888,
- HAILO_FORMAT_ORDER_NHW, // HAILO_FORMAT_ORDER_NCHW,
- HAILO_FORMAT_ORDER_YUY2, // HAILO_FORMAT_ORDER_YUY2,
- HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV12,
- HAILO_FORMAT_ORDER_MAX_ENUM, // Not used in device side - HAILO_FORMAT_ORDER_NV21,
- HAILO_FORMAT_ORDER_NV12, // HAILO_FORMAT_ORDER_HAILO_YYUV,
- HAILO_FORMAT_ORDER_NV21, // HAILO_FORMAT_ORDER_HAILO_YYVU,
- HAILO_FORMAT_ORDER_MAX_ENUM // Not used in device side - HAILO_FORMAT_ORDER_RGB4
-};
-
-
-#define HAILO_DEFAULT_NETWORK_NAME_QUALIFIER (std::string("/"))
-
-
-class HailoRTDefaults
-{
-public:
- HailoRTDefaults() = delete;
-
- // This func must be aligned to SDK!
- static Expected<hailo_format_order_t> get_device_format_order(uint32_t compiler_format_order)
- {
- switch (compiler_format_order) {
- case 0:
- return std::move(HAILO_FORMAT_ORDER_NHWC);
- break;
- case 1:
- return std::move(HAILO_FORMAT_ORDER_NHCW);
- break;
- case 2:
- return std::move(HAILO_FORMAT_ORDER_NC);
- break;
- case 3:
- return std::move(HAILO_FORMAT_ORDER_FCR);
- break;
- case 4:
- return std::move(HAILO_FORMAT_ORDER_BAYER_RGB);
- break;
- case 5:
- return std::move(HAILO_FORMAT_ORDER_NHW);
- break;
- case 6:
- return std::move(HAILO_FORMAT_ORDER_HAILO_NMS);
- break;
- case 7:
- return std::move(HAILO_FORMAT_ORDER_F8CR);
- break;
- case 8:
- return std::move(HAILO_FORMAT_ORDER_RGB888);
- break;
- case 11:
- return std::move(HAILO_FORMAT_ORDER_YUY2);
- break;
- case 14:
- return std::move(HAILO_FORMAT_ORDER_HAILO_YYUV);
- break;
- case 15:
- return std::move(HAILO_FORMAT_ORDER_HAILO_YYVU);
- break;
- default:
- LOGGER__ERROR("Invalid compiler_format_order ({})", compiler_format_order);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
- }
-
- static constexpr hailo_format_order_t get_default_host_format_order(const hailo_format_t &device_format)
- {
- const bool is_argmax = (0 != (device_format.flags & HAILO_FORMAT_FLAGS_HOST_ARGMAX));
- if (!is_argmax) {
- return DEFAULT_FORMAT_ORDER_MAP[device_format.order];
- } else {
- return DEFAULT_FORMAT_ARGMAX_ORDER_MAP[device_format.order];
- }
- }
-
- static constexpr struct sockaddr_in get_sockaddr() {
- struct sockaddr_in address{};
- address.sin_family = AF_INET;
- address.sin_port = 0;
- address.sin_addr.s_addr = INADDR_ANY;
- // sin_zero is already zeroed
- return address;
- }
-
- static constexpr hailo_format_t get_user_buffer_format()
- {
- return get_user_buffer_format(true, HAILO_FORMAT_TYPE_AUTO);
- }
-
- static constexpr hailo_format_t get_user_buffer_format(bool quantized, hailo_format_type_t format_type)
- {
- hailo_format_t user_buffer_format{};
- user_buffer_format.type = format_type;
- user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
-
- hailo_format_flags_t flags = HAILO_FORMAT_FLAGS_NONE;
- if (quantized) {
- flags = static_cast<hailo_format_flags_t>(flags | HAILO_FORMAT_FLAGS_QUANTIZED);
- }
-
- user_buffer_format.flags = flags;
- return user_buffer_format;
- }
-
- static constexpr hailo_transform_params_t get_transform_params(bool quantized, hailo_format_type_t format_type)
- {
- hailo_transform_params_t params{};
- params.transform_mode = HAILO_STREAM_TRANSFORM_COPY;
- params.user_buffer_format = get_user_buffer_format(quantized, format_type);
- return params;
- }
-
- static constexpr hailo_transform_params_t get_transform_params()
- {
- return get_transform_params(true, HAILO_FORMAT_TYPE_AUTO);
- }
-
- static constexpr hailo_vstream_params_t get_vstreams_params()
- {
- return get_vstreams_params(true, HAILO_FORMAT_TYPE_AUTO);
- }
-
- static constexpr hailo_vstream_params_t get_vstreams_params(bool quantized, hailo_format_type_t format_type)
- {
- hailo_vstream_params_t params{};
- params.user_buffer_format = get_user_buffer_format(quantized, format_type);
- params.queue_size = HAILO_DEFAULT_VSTREAM_QUEUE_SIZE;
- params.timeout_ms = HAILO_DEFAULT_VSTREAM_TIMEOUT_MS;
- params.vstream_stats_flags = HAILO_VSTREAM_STATS_NONE;
- params.pipeline_elements_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
- return params;
- }
-
- static constexpr hailo_transform_params_t get_transform_params(const hailo_stream_info_t &stream_info)
- {
- hailo_transform_params_t params{};
- params.transform_mode = HAILO_STREAM_TRANSFORM_COPY;
- params.user_buffer_format.type = stream_info.format.type;
- params.user_buffer_format.order = get_default_host_format_order(stream_info.format);
- params.user_buffer_format.flags = static_cast<hailo_format_flags_t>(
- HAILO_FORMAT_FLAGS_QUANTIZED &
- ~HAILO_FORMAT_FLAGS_TRANSPOSED);
- return params;
- }
-
- static constexpr hailo_eth_input_stream_params_t get_eth_input_stream_params() {
- hailo_eth_input_stream_params_t params{};
- params.host_address = get_sockaddr();
- params.device_port = HAILO_DEFAULT_ETH_DEVICE_PORT;
- params.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
- params.is_sync_enabled = false;
- params.frames_per_sync = 0;
- params.rate_limit_bytes_per_sec = 0;
- params.buffers_threshold = HAILO_DEFAULT_BUFFERS_THRESHOLD;
- return params;
- }
-
- static constexpr hailo_eth_output_stream_params_t get_eth_output_stream_params() {
- hailo_eth_output_stream_params_t params{};
- params.host_address = get_sockaddr();
- params.device_port = HAILO_DEFAULT_ETH_DEVICE_PORT;
- params.max_payload_size = HAILO_DEFAULT_ETH_MAX_PAYLOAD_SIZE;
- params.is_sync_enabled = true;
- return params;
- }
-
-
- static constexpr hailo_pcie_input_stream_params_t get_pcie_input_stream_params() {
- hailo_pcie_input_stream_params_t params{};
- return params;
- }
-
- static constexpr hailo_pcie_output_stream_params_t get_pcie_output_stream_params() {
- hailo_pcie_output_stream_params_t params{};
- return params;
- }
-
- static constexpr hailo_core_input_stream_params_t get_core_input_stream_params() {
- hailo_core_input_stream_params_t params{};
- return params;
- }
-
- static constexpr hailo_core_output_stream_params_t get_core_output_stream_params() {
- hailo_core_output_stream_params_t params{};
- return params;
- }
-
- static constexpr hailo_mipi_input_stream_params_t get_mipi_input_stream_params()
- {
- hailo_mipi_input_stream_params_t params = {};
-
- params.mipi_rx_id = 0;
- params.data_type = HAILO_MIPI_RX_TYPE_RAW_8;
-
- params.mipi_common_params.img_width_pixels = 1920;
- params.mipi_common_params.img_height_pixels = 1080;
- params.mipi_common_params.pixels_per_clock = HAILO_MIPI_PIXELS_PER_CLOCK_4;
- params.mipi_common_params.number_of_lanes = 2;
- params.mipi_common_params.clock_selection = HAILO_MIPI_CLOCK_SELECTION_AUTOMATIC;
- params.mipi_common_params.data_rate = 260;
- params.mipi_common_params.virtual_channel_index = 0;
-
- params.isp_enable = false;
- params.isp_params.isp_img_in_order = HAILO_MIPI_ISP_IMG_IN_ORDER_GR_FIRST;
- params.isp_params.isp_img_out_data_type = HAILO_MIPI_IMG_OUT_DATA_TYPE_RGB_888;
- params.isp_params.isp_crop_enable = false;
- params.isp_params.isp_crop_output_width_pixels = 1920;
- params.isp_params.isp_crop_output_height_pixels = 1080;
- params.isp_params.isp_crop_output_width_start_offset_pixels = 0;
- params.isp_params.isp_crop_output_height_start_offset_pixels = 0;
- params.isp_params.isp_test_pattern_enable = true;
- params.isp_params.isp_configuration_bypass = false;
- params.isp_params.isp_run_time_ae_enable = true;
- params.isp_params.isp_run_time_awb_enable = true;
- params.isp_params.isp_run_time_adt_enable = true;
- params.isp_params.isp_run_time_af_enable = false;
- params.isp_params.isp_run_time_calculations_interval_ms = 0;
- params.isp_params.isp_light_frequency = HAILO_MIPI_ISP_LIGHT_FREQUENCY_50HZ;
-
- return params;
- }
-
- static Expected<hailo_stream_parameters_t> get_stream_parameters(hailo_stream_interface_t interface,
- hailo_stream_direction_t direction)
- {
- hailo_stream_parameters_t params = {};
- params.stream_interface = interface;
- params.direction = direction;
- switch (params.stream_interface) {
- case HAILO_STREAM_INTERFACE_PCIE:
- if (HAILO_H2D_STREAM == direction) {
- params.pcie_input_params = get_pcie_input_stream_params();
- } else {
- params.pcie_output_params = get_pcie_output_stream_params();
- }
- break;
- case HAILO_STREAM_INTERFACE_CORE:
- if (HAILO_H2D_STREAM == direction) {
- params.core_input_params = get_core_input_stream_params();
- } else {
- params.core_output_params = get_core_output_stream_params();
- }
- break;
- case HAILO_STREAM_INTERFACE_ETH:
- if (HAILO_H2D_STREAM == direction) {
- params.eth_input_params = get_eth_input_stream_params();
- } else {
- params.eth_output_params = get_eth_output_stream_params();
- }
- break;
- default:
- LOGGER__ERROR("Invalid stream interface");
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
- return params;
- }
-
- static hailo_activate_network_group_params_t get_network_group_params()
- {
- hailo_activate_network_group_params_t params = {};
- return params;
- }
-
- static ConfigureNetworkParams get_configure_params(uint16_t batch_size = HAILO_DEFAULT_BATCH_SIZE,
- hailo_power_mode_t power_mode = HAILO_POWER_MODE_PERFORMANCE)
- {
- ConfigureNetworkParams params = {};
- params.batch_size = batch_size;
- if (std::getenv("FORCE_POWER_MODE_ULTRA_PERFORMANCE") != nullptr) {
- power_mode = HAILO_POWER_MODE_ULTRA_PERFORMANCE;
- }
- params.power_mode = power_mode;
- params.latency = HAILO_LATENCY_NONE;
- return params;
- }
-
- static hailo_network_parameters_t get_network_parameters(uint16_t batch_size = HAILO_DEFAULT_BATCH_SIZE)
- {
- hailo_network_parameters_t params = {};
- params.batch_size = batch_size;
-
- return params;
- }
-
- static std::string get_network_name(const std::string &net_group_name)
- {
- std::string default_network_name = net_group_name +
- HAILO_DEFAULT_NETWORK_NAME_QUALIFIER +
- net_group_name;
-
- return default_network_name;
- }
-
- static hailo_format_t expand_auto_format(const hailo_format_t &host_format, const hailo_format_t &hw_format)
- {
- auto host_format_copy = host_format;
- if (HAILO_FORMAT_TYPE_AUTO == host_format_copy.type) {
- host_format_copy.type = hw_format.type;
- }
- if (HAILO_FORMAT_ORDER_AUTO == host_format_copy.order) {
- host_format_copy.order = get_default_host_format_order(hw_format);
- }
- return host_format_copy;
- }
-
- static hailo_vdevice_params_t get_vdevice_params()
- {
- hailo_vdevice_params_t params = {};
- params.device_count = HAILO_DEFAULT_DEVICE_COUNT;
- params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN;
- params.device_ids = nullptr;
- params.group_id = HAILO_DEFAULT_VDEVICE_GROUP_ID;
- params.multi_process_service = false;
- return params;
- }
-};
-
-} /* namespace hailort */
-
-#endif /* HAILORT_DEFAULTS_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hailort_logger.cpp
- * @brief Implements logger used by hailort.
- **/
-
-#include "hailort_logger.hpp"
-#include "common/utils.hpp"
-#include "common/filesystem.hpp"
-
-#include <spdlog/sinks/basic_file_sink.h>
-#include <spdlog/sinks/rotating_file_sink.h>
-#include <spdlog/sinks/stdout_color_sinks.h>
-#include <spdlog/sinks/android_sink.h>
-#include <spdlog/sinks/null_sink.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <iostream>
-#include <iomanip>
-#ifdef _WIN32
-#include <shlwapi.h>
-#include <shlobj.h>
-#endif
-
-namespace hailort
-{
-
-#define MAX_LOG_FILE_SIZE (1024 * 1024) // 1MB
-
-#define HAILORT_NAME ("HailoRT")
-#define HAILORT_LOGGER_FILENAME ("hailort.log")
-#define HAILORT_MAX_NUMBER_OF_LOG_FILES (1) // There will be 2 log files - 1 spare
-#define HAILORT_CONSOLE_LOGGER_PATTERN ("[%n] [%^%l%$] %v") // Console logger will print: [hailort logger file name] [log level] msg
-#define HAILORT_MAIN_FILE_LOGGER_PATTERN ("[%Y-%m-%d %X.%e] [%P] [%n] [%l] [%s:%#] [%!] %v") //File logger will print: [timestamp] [PID] [hailort] [log level] [source file:line number] [function name] msg
-#define HAILORT_LOCAL_FILE_LOGGER_PATTERN ("[%Y-%m-%d %X.%e] [%n] [%l] [%s:%#] [%!] %v") //File logger will print: [timestamp] [hailort] [log level] [source file:line number] [function name] msg
-#define HAILORT_ANDROID_LOGGER_PATTERN ("%v") // Android logger will print only message (additional info are built-in)
-
-#define HAILORT_LOGGER_PATH_ENV_VAR ("HAILORT_LOGGER_PATH")
-
-#ifdef _WIN32
-#define PATH_SEPARATOR "\\"
-#else
-#define PATH_SEPARATOR "/"
-#endif
-
-std::string HailoRTLogger::parse_log_path(const char *log_path)
-{
- if ((nullptr == log_path) || (std::strlen(log_path) == 0)) {
- return ".";
- }
-
- std::string log_path_str(log_path);
- if (log_path_str == "NONE") {
- return "";
- }
-
- return log_path_str;
-}
-
-std::string HailoRTLogger::get_log_path(const std::string &path_env_var)
-{
- auto log_path_c_str = std::getenv(path_env_var.c_str());
- return parse_log_path(log_path_c_str);
-}
-
-std::string HailoRTLogger::get_main_log_path()
-{
- std::string local_log_path = get_log_path(HAILORT_LOGGER_PATH_ENV_VAR);
- if (local_log_path.length() == 0) {
- return "";
- }
-
-#ifdef _WIN32
- // See https://stackoverflow.com/questions/2899013/how-do-i-get-the-application-data-path-in-windows-using-c
- TCHAR local_app_data_path[MAX_PATH];
- auto result = SHGetFolderPath(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, local_app_data_path);
- if (!SUCCEEDED(result)) {
- std::cerr << "Cannot resolve Local Application Data directory path" << std::endl;
- return "";
- }
-
- const auto hailo_dir_path = std::string(local_app_data_path) + PATH_SEPARATOR + "Hailo";
- const auto full_path = hailo_dir_path + PATH_SEPARATOR + "HailoRT";
-#else
- const auto hailo_dir_path = Filesystem::get_home_directory() + PATH_SEPARATOR + ".hailo";
- const auto full_path = hailo_dir_path + PATH_SEPARATOR + "hailort";
-#endif
-
- auto status = Filesystem::create_directory(hailo_dir_path);
- if (HAILO_SUCCESS != status) {
- std::cerr << "Cannot create directory at path " << hailo_dir_path << std::endl;
- return "";
- }
-
- status = Filesystem::create_directory(full_path);
- if (HAILO_SUCCESS != status) {
- std::cerr << "Cannot create directory at path " << full_path << std::endl;
- return "";
- }
-
- return full_path;
-}
-
-std::shared_ptr<spdlog::sinks::sink> HailoRTLogger::create_file_sink(const std::string &dir_path, const std::string &filename, bool rotate)
-{
- if ("" == dir_path) {
- return make_shared_nothrow<spdlog::sinks::null_sink_st>();
- }
-
- if (!Filesystem::is_path_accesible(dir_path)) {
- std::cerr << "HailoRT warning: Cannot create log file " << filename
- << "! Please check the directory " << dir_path << " write permissions." << std::endl;
- // Create null sink instead (Will throw away its log)
- return make_shared_nothrow<spdlog::sinks::null_sink_st>();
- }
-
- const auto file_path = dir_path + PATH_SEPARATOR + filename;
- if (Filesystem::does_file_exists(file_path) && !Filesystem::is_path_accesible(file_path)) {
- std::cerr << "HailoRT warning: Cannot create log file " << filename
- << "! Please check the file " << file_path << " write permissions." << std::endl;
- // Create null sink instead (Will throw away its log)
- return make_shared_nothrow<spdlog::sinks::null_sink_st>();
- }
-
- if (rotate) {
- return make_shared_nothrow<spdlog::sinks::rotating_file_sink_mt>(file_path, MAX_LOG_FILE_SIZE, HAILORT_MAX_NUMBER_OF_LOG_FILES);
- }
-
- return make_shared_nothrow<spdlog::sinks::basic_file_sink_mt>(file_path);
-}
-
-HailoRTLogger::HailoRTLogger() :
- m_console_sink(make_shared_nothrow<spdlog::sinks::stderr_color_sink_mt>()),
-#ifdef __ANDROID__
- m_main_log_file_sink(make_shared_nothrow<spdlog::sinks::android_sink_mt>(HAILORT_NAME))
-#else
- m_main_log_file_sink(create_file_sink(get_main_log_path(), HAILORT_LOGGER_FILENAME, true)),
- m_local_log_file_sink(create_file_sink(get_log_path(HAILORT_LOGGER_PATH_ENV_VAR), HAILORT_LOGGER_FILENAME, true))
-#endif
-{
-
-#ifdef __ANDROID__
- m_main_log_file_sink->set_pattern(HAILORT_ANDROID_LOGGER_PATTERN);
-#else
- m_main_log_file_sink->set_pattern(HAILORT_MAIN_FILE_LOGGER_PATTERN);
- m_local_log_file_sink->set_pattern(HAILORT_LOCAL_FILE_LOGGER_PATTERN);
-#endif
-
- // TODO: Handle null pointers for logger and sinks
- m_console_sink->set_pattern(HAILORT_CONSOLE_LOGGER_PATTERN);
- spdlog::sinks_init_list sink_list = { m_console_sink, m_main_log_file_sink, m_local_log_file_sink };
- m_hailort_logger = make_shared_nothrow<spdlog::logger>(HAILORT_NAME, sink_list.begin(), sink_list.end());
-
-#ifdef NDEBUG
- set_levels(spdlog::level::warn, spdlog::level::info, spdlog::level::warn);
-#else
- set_levels(spdlog::level::warn, spdlog::level::debug, spdlog::level::debug);
-#endif
- spdlog::set_default_logger(m_hailort_logger);
-}
-
-std::shared_ptr<spdlog::logger> HailoRTLogger::logger()
-{
- return m_hailort_logger;
-}
-
-void HailoRTLogger::set_levels(spdlog::level::level_enum console_level,
- spdlog::level::level_enum file_level, spdlog::level::level_enum flush_level)
-{
- m_console_sink->set_level(console_level);
- m_main_log_file_sink->set_level(file_level);
- m_local_log_file_sink->set_level(file_level);
- m_hailort_logger->flush_on(flush_level);
-}
-
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file logger_macros.hpp
- * @brief Declares logger used by hailort.
- **/
-
-#ifndef _HAILORT_LOGGER_HPP_
-#define _HAILORT_LOGGER_HPP_
-
-
-#include <string.h>
-#include <stdint.h>
-#include <ctype.h>
-
-#include "hailo/hailort.h"
-#include "common/logger_macros.hpp"
-
-namespace hailort
-{
-
-class HailoRTLogger {
-public:
- static HailoRTLogger& get_instance()
- {
- static HailoRTLogger instance;
- return instance;
- }
- HailoRTLogger(HailoRTLogger const&) = delete;
- void operator=(HailoRTLogger const&) = delete;
-
- std::shared_ptr<spdlog::logger> logger();
- void set_levels(spdlog::level::level_enum console_level, spdlog::level::level_enum file_level,
- spdlog::level::level_enum flush_level);
- static std::string get_log_path(const std::string &path_env_var);
- static std::string get_main_log_path();
- static std::shared_ptr<spdlog::sinks::sink> create_file_sink(const std::string &dir_path, const std::string &filename, bool rotate);
-
-private:
- HailoRTLogger();
- static std::string parse_log_path(const char *log_path);
-
- std::shared_ptr<spdlog::sinks::sink> m_console_sink;
-
- // The main log will written to a centralized directory (home directory)
- // The local log will be written to the local directory or to the path the user has chosen (via $HAILORT_LOGGER_PATH)
- std::shared_ptr<spdlog::sinks::sink> m_main_log_file_sink;
- std::shared_ptr<spdlog::sinks::sink> m_local_log_file_sink;
- std::shared_ptr<spdlog::logger> m_hailort_logger;
-};
-
-
-} /* namespace hailort */
-
-#endif /* _HAILORT_LOGGER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hailort_rpc_client.cpp
- * @brief Implementation of the hailort rpc client
- **/
-
-#include "hailort_rpc_client.hpp"
-#include "common/utils.hpp"
-#include "hef_internal.hpp"
-
-#include <grpcpp/health_check_service_interface.h>
-
-
-namespace hailort
-{
-
-hailo_status HailoRtRpcClient::client_keep_alive(uint32_t process_id)
-{
- keepalive_Request request;
- request.set_process_id(process_id);
- empty reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->client_keep_alive(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- return HAILO_SUCCESS;
-}
-
-Expected<hailo_version_t> HailoRtRpcClient::get_service_version()
-{
- get_service_version_Request request;
- get_service_version_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->get_service_version(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto version_proto = reply.hailo_version();
- hailo_version_t service_version = {version_proto.major_version(), version_proto.minor_version(), version_proto.revision_version()};
- return service_version;
-}
-
-Expected<uint32_t> HailoRtRpcClient::VDevice_create(const hailo_vdevice_params_t ¶ms, uint32_t pid) {
- VDevice_create_Request request;
- request.set_pid(pid);
- auto proto_vdevice_params = request.mutable_hailo_vdevice_params();
- proto_vdevice_params->set_device_count(params.device_count);
- auto ids = proto_vdevice_params->mutable_device_ids();
- if (params.device_ids != nullptr) {
- for (size_t i = 0; i < params.device_count; ++i) {
- ids->Add(std::string(params.device_ids[i].id));
- }
- }
- proto_vdevice_params->set_scheduling_algorithm(params.scheduling_algorithm);
- proto_vdevice_params->set_group_id(params.group_id == nullptr ? "" : std::string(params.group_id));
-
- VDevice_create_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->VDevice_create(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return reply.handle();
-}
-
-hailo_status HailoRtRpcClient::VDevice_release(uint32_t handle)
-{
- Release_Request request;
- request.set_handle(handle);
-
- Release_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->VDevice_release(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
- return HAILO_SUCCESS;
-}
-
-Expected<std::vector<uint32_t>> HailoRtRpcClient::InputVStreams_create(uint32_t net_group_handle,
- const std::map<std::string, hailo_vstream_params_t> &inputs_params, uint32_t pid)
-{
- VStream_create_Request request;
- request.set_net_group(net_group_handle);
- request.set_pid(pid);
- auto proto_vstreams_params = request.mutable_vstreams_params();
- for (const auto &name_params_pair : inputs_params) {
- ProtoNamedVStreamParams proto_name_param_pair;
- auto vstream_params = name_params_pair.second;
-
- proto_name_param_pair.set_name(name_params_pair.first);
- auto proto_vstream_param = proto_name_param_pair.mutable_params();
-
- auto proto_user_buffer_format = proto_vstream_param->mutable_user_buffer_format();
- auto user_buffer_format = vstream_params.user_buffer_format;
- proto_user_buffer_format->set_type(user_buffer_format.type);
- proto_user_buffer_format->set_order(user_buffer_format.order);
- proto_user_buffer_format->set_flags(user_buffer_format.flags);
-
- proto_vstream_param->set_timeout_ms(vstream_params.timeout_ms);
- proto_vstream_param->set_queue_size(vstream_params.queue_size);
-
- proto_vstream_param->set_vstream_stats_flags(vstream_params.vstream_stats_flags);
- proto_vstream_param->set_pipeline_elements_stats_flags(vstream_params.vstream_stats_flags);
-
- proto_vstreams_params->Add(std::move(proto_name_param_pair));
- }
-
- VStreams_create_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->InputVStreams_create(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- std::vector<uint32_t> input_vstreams_handles;
- input_vstreams_handles.reserve(reply.handles_size());
- for (auto &handle : *reply.mutable_handles()) {
- input_vstreams_handles.push_back(handle);
- }
- return input_vstreams_handles;
-}
-
-hailo_status HailoRtRpcClient::InputVStream_release(uint32_t handle)
-{
- Release_Request request;
- request.set_handle(handle);
-
- Release_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->InputVStream_release(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
- return HAILO_SUCCESS;
-}
-
-Expected<std::vector<uint32_t>> HailoRtRpcClient::OutputVStreams_create(uint32_t net_group_handle,
- const std::map<std::string, hailo_vstream_params_t> &output_params, uint32_t pid)
-{
- VStream_create_Request request;
- request.set_net_group(net_group_handle);
- request.set_pid(pid);
- auto proto_vstreams_params = request.mutable_vstreams_params();
- for (const auto &name_params_pair : output_params) {
- ProtoNamedVStreamParams proto_name_param_pair;
- auto vstream_params = name_params_pair.second;
-
- proto_name_param_pair.set_name(name_params_pair.first);
- auto proto_vstream_param = proto_name_param_pair.mutable_params();
-
- auto proto_user_buffer_format = proto_vstream_param->mutable_user_buffer_format();
- auto user_buffer_format = vstream_params.user_buffer_format;
- proto_user_buffer_format->set_type(user_buffer_format.type);
- proto_user_buffer_format->set_order(user_buffer_format.order);
- proto_user_buffer_format->set_flags(user_buffer_format.flags);
-
- proto_vstream_param->set_timeout_ms(vstream_params.timeout_ms);
- proto_vstream_param->set_queue_size(vstream_params.queue_size);
-
- proto_vstream_param->set_vstream_stats_flags(vstream_params.vstream_stats_flags);
- proto_vstream_param->set_pipeline_elements_stats_flags(vstream_params.vstream_stats_flags);
-
- proto_vstreams_params->Add(std::move(proto_name_param_pair));
- }
-
- VStreams_create_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->OutputVStreams_create(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- std::vector<uint32_t> output_vstreams_handles;
- output_vstreams_handles.reserve(reply.handles_size());
- for (auto &handle : *reply.mutable_handles()) {
- output_vstreams_handles.push_back(handle);
- }
- return output_vstreams_handles;
-}
-
-hailo_status HailoRtRpcClient::OutputVStream_release(uint32_t handle)
-{
- Release_Request request;
- request.set_handle(handle);
-
- Release_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->OutputVStream_release(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
- return HAILO_SUCCESS;
-}
-
-Expected<std::vector<uint32_t>> HailoRtRpcClient::VDevice_configure(uint32_t vdevice_handle, const Hef &hef,
- uint32_t pid, const NetworkGroupsParamsMap &configure_params)
-{
- VDevice_configure_Request request;
- request.set_handle(vdevice_handle);
- request.set_pid(pid);
- auto hef_memview = hef.pimpl->get_hef_memview();
- request.set_hef(hef_memview.data(), hef_memview.size());
-
- // Serialize NetworkGroupsParamsMap
- for (const auto &name_params_pair : configure_params) {
- auto proto_net_params = request.add_configure_params_map();
- proto_net_params->set_name(name_params_pair.first);
-
- auto net_configure_params = name_params_pair.second;
- auto proto_network_configure_params = proto_net_params->mutable_params();
- proto_network_configure_params->set_batch_size(net_configure_params.batch_size);
- proto_network_configure_params->set_power_mode(net_configure_params.power_mode);
- proto_network_configure_params->set_latency(net_configure_params.latency);
-
- // Init stream params map
- for (const auto &name_stream_params_pair : net_configure_params.stream_params_by_name) {
- auto proto_name_streams_params = proto_network_configure_params->add_stream_params_map();
- proto_name_streams_params->set_name(name_stream_params_pair.first);
-
- auto proto_stream_params = proto_name_streams_params->mutable_params();
- auto stream_params = name_stream_params_pair.second;
- proto_stream_params->set_stream_interface(stream_params.stream_interface);
- proto_stream_params->set_direction(stream_params.direction);
- }
-
- // Init network params map
- for (const auto &name_network_params_pair : net_configure_params.network_params_by_name) {
- auto proto_name_network_params = proto_network_configure_params->add_network_params_map();
- proto_name_network_params->set_name(name_network_params_pair.first);
-
- auto proto_network_params = proto_name_network_params->mutable_params();
- auto network_params = name_network_params_pair.second;
- proto_network_params->set_batch_size(network_params.batch_size);
- }
- }
-
- VDevice_configure_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->VDevice_configure(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
-
- std::vector<uint32_t> networks_handles(reply.networks_handles().begin(), reply.networks_handles().end());
- return networks_handles;
-}
-
-Expected<std::vector<std::string>> HailoRtRpcClient::VDevice_get_physical_devices_ids(uint32_t handle)
-{
- VDevice_get_physical_devices_ids_Request request;
- request.set_handle(handle);
-
- VDevice_get_physical_devices_ids_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->VDevice_get_physical_devices_ids(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- std::vector<std::string> result;
- for (auto &device_id_proto : reply.devices_ids()) {
- result.push_back(device_id_proto);
- }
- return result;
-}
-
-Expected<hailo_stream_interface_t> HailoRtRpcClient::VDevice_get_default_streams_interface(uint32_t handle)
-{
- VDevice_get_default_streams_interface_Request request;
- request.set_handle(handle);
-
- VDevice_get_default_streams_interface_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->VDevice_get_default_streams_interface(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- CHECK_AS_EXPECTED(reply.stream_interface() < HAILO_STREAM_INTERFACE_MAX_ENUM, HAILO_INTERNAL_FAILURE,
- "stream_interface {} out of range", reply.stream_interface());
- return static_cast<hailo_stream_interface_t>(reply.stream_interface());
-}
-
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_release(uint32_t handle)
-{
- Release_Request request;
- request.set_handle(handle);
-
- Release_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_release(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
- return HAILO_SUCCESS;
-}
-
-std::map<std::string, hailo_vstream_params_t> get_group(const ProtoNamedVStreamParamsMap &named_params_map)
-{
- std::map<std::string, hailo_vstream_params_t> result;
- for (auto &named_params : named_params_map.vstream_params_map()) {
- auto name = named_params.name();
- auto proto_params = named_params.params();
- auto proto_user_buffer_format = proto_params.user_buffer_format();
- hailo_format_t user_buffer_format = {
- .type = static_cast<hailo_format_type_t>(proto_user_buffer_format.type()),
- .order = static_cast<hailo_format_order_t>(proto_user_buffer_format.order()),
- .flags = static_cast<hailo_format_flags_t>(proto_user_buffer_format.flags())
- };
- hailo_vstream_params_t params = {
- .user_buffer_format = user_buffer_format,
- .timeout_ms = proto_params.timeout_ms(),
- .queue_size = proto_params.queue_size(),
- .vstream_stats_flags = static_cast<hailo_vstream_stats_flags_t>(proto_params.vstream_stats_flags()),
- .pipeline_elements_stats_flags = static_cast<hailo_pipeline_elem_stats_flags_t>(proto_params.pipeline_elements_stats_flags())
- };
- result.insert({name, params});
- }
- return result;
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::ConfiguredNetworkGroup_make_input_vstream_params(
- uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name)
-{
- ConfiguredNetworkGroup_make_input_vstream_params_Request request;
- request.set_handle(handle);
- request.set_quantized(quantized);
- request.set_format_type(format_type);
- request.set_timeout_ms(timeout_ms);
- request.set_queue_size(queue_size);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_make_input_vstream_params_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_make_input_vstream_params(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return get_group(reply.vstream_params_map());
-}
-
-Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> HailoRtRpcClient::ConfiguredNetworkGroup_make_output_vstream_params_groups(
- uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- ConfiguredNetworkGroup_make_output_vstream_params_groups_Request request;
- request.set_handle(handle);
- request.set_quantized(quantized);
- request.set_format_type(format_type);
- request.set_timeout_ms(timeout_ms);
- request.set_queue_size(queue_size);
-
- ConfiguredNetworkGroup_make_output_vstream_params_groups_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_make_output_vstream_params_groups(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- std::vector<std::map<std::string, hailo_vstream_params_t>> result;
- for (auto &map_proto : reply.vstream_params_groups()) {
- auto group = get_group(map_proto);
- result.push_back(group);
- }
- return result;
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::ConfiguredNetworkGroup_make_output_vstream_params(
- uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name)
-{
- ConfiguredNetworkGroup_make_output_vstream_params_Request request;
- request.set_handle(handle);
- request.set_quantized(quantized);
- request.set_format_type(format_type);
- request.set_timeout_ms(timeout_ms);
- request.set_queue_size(queue_size);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_make_output_vstream_params_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_make_output_vstream_params(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- std::map<std::string, hailo_vstream_params_t> result;
- for (int i = 0; i < reply.vstream_params_map().vstream_params_map_size(); ++i) {
- auto name = reply.vstream_params_map().vstream_params_map(i).name();
- auto proto_params = reply.vstream_params_map().vstream_params_map(i).params();
- auto proto_user_buffer_format = proto_params.user_buffer_format();
- hailo_format_t user_buffer_format = {
- .type = static_cast<hailo_format_type_t>(proto_user_buffer_format.type()),
- .order = static_cast<hailo_format_order_t>(proto_user_buffer_format.order()),
- .flags = static_cast<hailo_format_flags_t>(proto_user_buffer_format.flags())
- };
- hailo_vstream_params_t params = {
- .user_buffer_format = user_buffer_format,
- .timeout_ms = proto_params.timeout_ms(),
- .queue_size = proto_params.queue_size(),
- .vstream_stats_flags = static_cast<hailo_vstream_stats_flags_t>(proto_params.vstream_stats_flags()),
- .pipeline_elements_stats_flags = static_cast<hailo_pipeline_elem_stats_flags_t>(proto_params.pipeline_elements_stats_flags())
- };
- result.insert({name, params});
- }
- return result;
-}
-
-Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_group_name(uint32_t handle)
-{
- return ConfiguredNetworkGroup_name(handle);
-}
-
-Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_name(uint32_t handle)
-{
- ConfiguredNetworkGroup_name_Request request;
- request.set_handle(handle);
-
- ConfiguredNetworkGroup_name_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_name(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto network_group_name = reply.network_group_name();
- return network_group_name;
-}
-
-Expected<std::vector<hailo_network_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_infos(uint32_t handle)
-{
- ConfiguredNetworkGroup_get_network_infos_Request request;
- request.set_handle(handle);
-
- ConfiguredNetworkGroup_get_network_infos_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_network_infos(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto network_infos_proto = reply.network_infos();
- std::vector<hailo_network_info_t> network_infos;
- network_infos.reserve(network_infos_proto.size());
- for (auto& info_proto : network_infos_proto) {
- hailo_network_info_t info;
- strcpy(info.name, info_proto.c_str());
- network_infos.push_back(info);
- }
- return network_infos;
-}
-
-Expected<std::vector<hailo_stream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_stream_infos(uint32_t handle,
- const std::string &network_name)
-{
- ConfiguredNetworkGroup_get_all_stream_infos_Request request;
- request.set_handle(handle);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_get_all_stream_infos_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_all_stream_infos(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- std::vector<hailo_stream_info_t> result;
- result.reserve(reply.stream_infos().size());
- for (auto proto_stream_info : reply.stream_infos()) {
- hailo_3d_image_shape_t shape{
- .height = proto_stream_info.stream_shape().shape().height(),
- .width = proto_stream_info.stream_shape().shape().width(),
- .features = proto_stream_info.stream_shape().shape().features(),
- };
- hailo_3d_image_shape_t hw_shape{
- .height = proto_stream_info.stream_shape().hw_shape().height(),
- .width = proto_stream_info.stream_shape().hw_shape().width(),
- .features = proto_stream_info.stream_shape().hw_shape().features(),
- };
- hailo_nms_defuse_info_t nms_defuse_info{
- .class_group_index = proto_stream_info.nms_info().defuse_info().class_group_index(),
- .original_name = {0}
- };
- strcpy(nms_defuse_info.original_name, proto_stream_info.nms_info().defuse_info().original_name().c_str());
- hailo_nms_info_t nms_info{
- .number_of_classes = proto_stream_info.nms_info().number_of_classes(),
- .max_bboxes_per_class = proto_stream_info.nms_info().max_bboxes_per_class(),
- .bbox_size = proto_stream_info.nms_info().bbox_size(),
- .chunks_per_frame = proto_stream_info.nms_info().chunks_per_frame(),
- .is_defused = proto_stream_info.nms_info().is_defused(),
- .defuse_info = nms_defuse_info,
- };
- hailo_format_t format{
- .type = static_cast<hailo_format_type_t>(proto_stream_info.format().type()),
- .order = static_cast<hailo_format_order_t>(proto_stream_info.format().order()),
- .flags = static_cast<hailo_format_flags_t>(proto_stream_info.format().flags())
- };
- hailo_quant_info_t quant_info{
- .qp_zp = proto_stream_info.quant_info().qp_zp(),
- .qp_scale = proto_stream_info.quant_info().qp_scale(),
- .limvals_min = proto_stream_info.quant_info().limvals_min(),
- .limvals_max = proto_stream_info.quant_info().limvals_max()
- };
- hailo_stream_info_t stream_info;
- if (format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
- stream_info.nms_info = nms_info;
- } else {
- stream_info.shape = shape;
- stream_info.hw_shape = hw_shape;
- }
- stream_info.hw_data_bytes = proto_stream_info.hw_data_bytes();
- stream_info.hw_frame_size = proto_stream_info.hw_frame_size();
- stream_info.format = format;
- stream_info.direction = static_cast<hailo_stream_direction_t>(proto_stream_info.direction());
- stream_info.index = static_cast<uint8_t>(proto_stream_info.index());
- strcpy(stream_info.name, proto_stream_info.name().c_str());
- stream_info.quant_info = quant_info;
- stream_info.is_mux = proto_stream_info.is_mux();
- result.push_back(stream_info);
- }
- return result;
-}
-
-Expected<hailo_stream_interface_t> HailoRtRpcClient::ConfiguredNetworkGroup_get_default_stream_interface(uint32_t handle)
-{
- ConfiguredNetworkGroup_get_default_stream_interface_Request request;
- request.set_handle(handle);
-
- ConfiguredNetworkGroup_get_default_stream_interface_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_default_stream_interface(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto stream_interface = static_cast<hailo_stream_interface_t>(reply.stream_interface());
- return stream_interface;
-}
-
-Expected<std::vector<std::vector<std::string>>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_groups(uint32_t handle)
-{
- ConfiguredNetworkGroup_get_output_vstream_groups_Request request;
- request.set_handle(handle);
-
- ConfiguredNetworkGroup_get_output_vstream_groups_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_output_vstream_groups(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto vstream_groups_proto = reply.output_vstream_groups();
- std::vector<std::vector<std::string>> result;
- result.reserve(vstream_groups_proto.size());
- for (auto& vstream_group_proto : vstream_groups_proto) {
- std::vector<std::string> group;
- group.reserve(vstream_group_proto.vstream_group().size());
- for (auto& name : vstream_group_proto.vstream_group()) {
- group.push_back(name);
- }
- result.push_back(group);
- }
- return result;
-}
-
-hailo_vstream_info_t deserialize_vstream_info(const ProtoVStreamInfo &info_proto)
-{
- hailo_vstream_info_t info;
- strcpy(info.name, info_proto.name().c_str());
- strcpy(info.network_name, info_proto.network_name().c_str());
- info.direction = static_cast<hailo_stream_direction_t>(info_proto.direction());
- hailo_format_t format = {
- .type = static_cast<hailo_format_type_t>(info_proto.format().type()),
- .order = static_cast<hailo_format_order_t>(info_proto.format().order()),
- .flags = static_cast<hailo_format_flags_t>(info_proto.format().flags())
- };
- info.format = format;
- if (format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
- hailo_nms_shape_t nms_shape = {
- .number_of_classes = info_proto.nms_shape().number_of_classes(),
- .max_bboxes_per_class = info_proto.nms_shape().max_bbox_per_class()
- };
- info.nms_shape = nms_shape;
- } else {
- hailo_3d_image_shape_t shape = {
- .height = info_proto.shape().height(),
- .width = info_proto.shape().width(),
- .features = info_proto.shape().features()
- };
- info.shape = shape;
- }
- hailo_quant_info_t quant_info = {
- .qp_zp = info_proto.quant_info().qp_zp(),
- .qp_scale = info_proto.quant_info().qp_scale(),
- .limvals_min = info_proto.quant_info().limvals_min(),
- .limvals_max = info_proto.quant_info().limvals_max()
- };
- info.quant_info = quant_info;
- return info;
-}
-
-Expected<std::vector<hailo_vstream_info_t>> deserialize_vstream_infos(const ConfiguredNetworkGroup_get_vstream_infos_Reply &reply)
-{
- std::vector<hailo_vstream_info_t> result;
- result.reserve(reply.vstream_infos().size());
- for (auto& info_proto : reply.vstream_infos()) {
- auto info = deserialize_vstream_info(info_proto);
- result.push_back(info);
- }
- return result;
-}
-
-Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_input_vstream_infos(uint32_t handle,
- std::string network_name)
-{
- ConfiguredNetworkGroup_get_vstream_infos_Request request;
- request.set_handle(handle);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_input_vstream_infos(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return deserialize_vstream_infos(reply);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_infos(uint32_t handle,
- std::string network_name)
-{
- ConfiguredNetworkGroup_get_vstream_infos_Request request;
- request.set_handle(handle);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_output_vstream_infos(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return deserialize_vstream_infos(reply);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_vstream_infos(uint32_t handle,
- std::string network_name)
-{
- ConfiguredNetworkGroup_get_vstream_infos_Request request;
- request.set_handle(handle);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_all_vstream_infos(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return deserialize_vstream_infos(reply);
-}
-
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_timeout(uint32_t handle,
- const std::chrono::milliseconds &timeout, const std::string &network_name)
-{
- ConfiguredNetworkGroup_set_scheduler_timeout_Request request;
- request.set_handle(handle);
- request.set_timeout_ms(static_cast<uint32_t>(timeout.count()));
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_set_scheduler_timeout_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_set_scheduler_timeout(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_threshold(uint32_t handle, uint32_t threshold,
- const std::string &network_name)
-{
- ConfiguredNetworkGroup_set_scheduler_threshold_Request request;
- request.set_handle(handle);
- request.set_threshold(threshold);
- request.set_network_name(network_name);
-
- ConfiguredNetworkGroup_set_scheduler_threshold_Reply reply;
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_set_scheduler_threshold(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-Expected<LatencyMeasurementResult> HailoRtRpcClient::ConfiguredNetworkGroup_get_latency_measurement(uint32_t handle,
- const std::string &network_name)
-{
- ConfiguredNetworkGroup_get_latency_measurement_Request request;
- ConfiguredNetworkGroup_get_latency_measurement_Reply reply;
- request.set_handle(handle);
- request.set_network_name(network_name);
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_latency_measurement(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- LatencyMeasurementResult result{
- .avg_hw_latency = std::chrono::nanoseconds(reply.avg_hw_latency())
- };
- return result;
-}
-
-Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_multi_context(uint32_t handle)
-{
- ConfiguredNetworkGroup_is_multi_context_Request request;
- ConfiguredNetworkGroup_is_multi_context_Reply reply;
- request.set_handle(handle);
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_is_multi_context(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return reply.is_multi_context();
-}
-
-Expected<ConfigureNetworkParams> HailoRtRpcClient::ConfiguredNetworkGroup_get_config_params(uint32_t handle)
-{
- ConfiguredNetworkGroup_get_config_params_Request request;
- ConfiguredNetworkGroup_get_config_params_Reply reply;
- request.set_handle(handle);
- grpc::ClientContext context;
- grpc::Status status = m_stub->ConfiguredNetworkGroup_get_config_params(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto proto_configure_params = reply.params();
- ConfigureNetworkParams network_configure_params;
- network_configure_params.batch_size = static_cast<uint16_t>(proto_configure_params.batch_size());
- network_configure_params.power_mode = static_cast<hailo_power_mode_t>(proto_configure_params.power_mode());
- network_configure_params.latency = static_cast<hailo_latency_measurement_flags_t>(proto_configure_params.latency());
- for (auto &proto_name_streams_params_pair : proto_configure_params.stream_params_map()) {
- auto proto_streams_params = proto_name_streams_params_pair.params();
- auto stream_direction = static_cast<hailo_stream_direction_t>(proto_streams_params.direction());
- hailo_stream_parameters_t stream_params;
- if (stream_direction == HAILO_H2D_STREAM) {
- stream_params = {
- .stream_interface = static_cast<hailo_stream_interface_t>(proto_streams_params.stream_interface()),
- .direction = stream_direction,
- {.pcie_input_params = {
- .reserved = 0
- }}
- };
- } else {
- stream_params = {
- .stream_interface = static_cast<hailo_stream_interface_t>(proto_streams_params.stream_interface()),
- .direction = stream_direction,
- {.pcie_output_params = {
- .reserved = 0
- }}
- };
- }
- network_configure_params.stream_params_by_name.insert({proto_name_streams_params_pair.name(), stream_params});
- }
- for (auto &proto_name_network_params_pair : proto_configure_params.network_params_map()) {
- auto proto_network_params = proto_name_network_params_pair.params();
- hailo_network_parameters_t net_params {
- .batch_size = static_cast<uint16_t>(proto_network_params.batch_size())
- };
-
- network_configure_params.network_params_by_name.insert({proto_name_network_params_pair.name(), net_params});
- }
- return network_configure_params;
-}
-
-hailo_status HailoRtRpcClient::InputVStream_write(uint32_t handle, const MemoryView &buffer)
-{
- InputVStream_write_Request request;
- request.set_handle(handle);
- request.set_data(buffer.data(), buffer.size());
- grpc::ClientContext context;
- InputVStream_write_Reply reply;
- grpc::Status status = m_stub->InputVStream_write(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- if (reply.status() == HAILO_STREAM_ABORTED_BY_USER) {
- return static_cast<hailo_status>(reply.status());
- }
- CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
- return HAILO_SUCCESS;
-}
-
-hailo_status HailoRtRpcClient::OutputVStream_read(uint32_t handle, MemoryView buffer)
-{
- OutputVStream_read_Request request;
- request.set_handle(handle);
- request.set_size(static_cast<uint32_t>(buffer.size()));
- grpc::ClientContext context;
- OutputVStream_read_Reply reply;
- grpc::Status status = m_stub->OutputVStream_read(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- if (reply.status() == HAILO_STREAM_ABORTED_BY_USER) {
- return static_cast<hailo_status>(reply.status());
- }
- CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
- memcpy(buffer.data(), reply.data().data(), buffer.size());
- return HAILO_SUCCESS;
-}
-
-Expected<size_t> HailoRtRpcClient::InputVStream_get_frame_size(uint32_t handle)
-{
- VStream_get_frame_size_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_get_frame_size_Reply reply;
- grpc::Status status = m_stub->InputVStream_get_frame_size(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return reply.frame_size();
-}
-
-Expected<size_t> HailoRtRpcClient::OutputVStream_get_frame_size(uint32_t handle)
-{
- VStream_get_frame_size_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_get_frame_size_Reply reply;
- grpc::Status status = m_stub->OutputVStream_get_frame_size(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- return reply.frame_size();
-}
-
-hailo_status HailoRtRpcClient::InputVStream_flush(uint32_t handle)
-{
- InputVStream_flush_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- InputVStream_flush_Reply reply;
- grpc::Status status = m_stub->InputVStream_flush(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-Expected<std::string> HailoRtRpcClient::InputVStream_name(uint32_t handle)
-{
- VStream_name_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_name_Reply reply;
- grpc::Status status = m_stub->InputVStream_name(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto name = reply.name();
- return name;
-}
-
-Expected<std::string> HailoRtRpcClient::OutputVStream_name(uint32_t handle)
-{
- VStream_name_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_name_Reply reply;
- grpc::Status status = m_stub->OutputVStream_name(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto name = reply.name();
- return name;
-}
-
-Expected<std::string> HailoRtRpcClient::InputVStream_network_name(uint32_t handle)
-{
- VStream_network_name_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_network_name_Reply reply;
- grpc::Status status = m_stub->InputVStream_network_name(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto name = reply.network_name();
- return name;
-}
-
-Expected<std::string> HailoRtRpcClient::OutputVStream_network_name(uint32_t handle)
-{
- VStream_network_name_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_network_name_Reply reply;
- grpc::Status status = m_stub->OutputVStream_network_name(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto name = reply.network_name();
- return name;
-}
-
-hailo_status HailoRtRpcClient::InputVStream_abort(uint32_t handle)
-{
- VStream_abort_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_abort_Reply reply;
- grpc::Status status = m_stub->InputVStream_abort(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-hailo_status HailoRtRpcClient::OutputVStream_abort(uint32_t handle)
-{
- VStream_abort_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_abort_Reply reply;
- grpc::Status status = m_stub->OutputVStream_abort(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-hailo_status HailoRtRpcClient::InputVStream_resume(uint32_t handle)
-{
- VStream_resume_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_resume_Reply reply;
- grpc::Status status = m_stub->InputVStream_resume(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-hailo_status HailoRtRpcClient::OutputVStream_resume(uint32_t handle)
-{
- VStream_resume_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_resume_Reply reply;
- grpc::Status status = m_stub->OutputVStream_resume(&context, request, &reply);
- CHECK_GRPC_STATUS(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- return static_cast<hailo_status>(reply.status());
-}
-
-Expected<hailo_format_t> HailoRtRpcClient::InputVStream_get_user_buffer_format(uint32_t handle)
-{
- VStream_get_user_buffer_format_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_get_user_buffer_format_Reply reply;
- grpc::Status status = m_stub->InputVStream_get_user_buffer_format(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
-
- auto user_buffer_format_proto = reply.user_buffer_format();
- hailo_format_t format{
- .type = static_cast<hailo_format_type_t>(user_buffer_format_proto.type()),
- .order = static_cast<hailo_format_order_t>(user_buffer_format_proto.order()),
- .flags = static_cast<hailo_format_flags_t>(user_buffer_format_proto.flags())
- };
-
- return format;
-}
-
-Expected<hailo_format_t> HailoRtRpcClient::OutputVStream_get_user_buffer_format(uint32_t handle)
-{
- VStream_get_user_buffer_format_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_get_user_buffer_format_Reply reply;
- grpc::Status status = m_stub->OutputVStream_get_user_buffer_format(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
-
- auto user_buffer_format_proto = reply.user_buffer_format();
- hailo_format_t format{
- .type = static_cast<hailo_format_type_t>(user_buffer_format_proto.type()),
- .order = static_cast<hailo_format_order_t>(user_buffer_format_proto.order()),
- .flags = static_cast<hailo_format_flags_t>(user_buffer_format_proto.flags())
- };
-
- return format;
-}
-
-Expected<hailo_vstream_info_t> HailoRtRpcClient::InputVStream_get_info(uint32_t handle)
-{
- VStream_get_info_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_get_info_Reply reply;
- grpc::Status status = m_stub->InputVStream_get_info(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto info_proto = reply.vstream_info();
- return deserialize_vstream_info(info_proto);
-}
-Expected<hailo_vstream_info_t> HailoRtRpcClient::OutputVStream_get_info(uint32_t handle)
-{
- VStream_get_info_Request request;
- request.set_handle(handle);
- grpc::ClientContext context;
- VStream_get_info_Reply reply;
- grpc::Status status = m_stub->OutputVStream_get_info(&context, request, &reply);
- CHECK_GRPC_STATUS_AS_EXPECTED(status);
- assert(reply.status() < HAILO_STATUS_COUNT);
- CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
- auto info_proto = reply.vstream_info();
- return deserialize_vstream_info(info_proto);
-}
-
-}
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hailort_rpc_client.hpp
- * @brief TODO
- **/
-
-#ifndef HAILO_HAILORT_RPC_CLIENT_HPP_
-#define HAILO_HAILORT_RPC_CLIENT_HPP_
-
-#include "hailo/expected.hpp"
-#include "hailo/hailort.hpp"
-#if defined(_MSC_VER)
-#pragma warning(push)
-#pragma warning(disable: 4244 4267 4127)
-#else
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-#include <grpcpp/grpcpp.h>
-#include "hailort_rpc.grpc.pb.h"
-#if defined(_MSC_VER)
-#pragma warning( pop )
-#else
-#pragma GCC diagnostic pop
-#endif
-#include <memory>
-
-namespace hailort
-{
-
-class HailoRtRpcClient final {
-public:
- HailoRtRpcClient(std::shared_ptr<grpc::Channel> channel)
- : m_stub(ProtoHailoRtRpc::NewStub(channel)) {}
-
- hailo_status client_keep_alive(uint32_t process_id);
- Expected<hailo_version_t> get_service_version();
-
- Expected<uint32_t> VDevice_create(const hailo_vdevice_params_t ¶ms, uint32_t pid);
- hailo_status VDevice_release(uint32_t handle);
- Expected<std::vector<std::string>> VDevice_get_physical_devices_ids(uint32_t handle);
- Expected<hailo_stream_interface_t> VDevice_get_default_streams_interface(uint32_t handle);
- Expected<std::vector<uint32_t>> VDevice_configure(uint32_t vdevice_handle, const Hef &hef, uint32_t pid, const NetworkGroupsParamsMap &configure_params={});
- hailo_status ConfiguredNetworkGroup_release(uint32_t handle);
- Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_input_vstream_params(uint32_t handle,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name);
- Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_output_vstream_params(uint32_t handle,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name);
- Expected<std::string> ConfiguredNetworkGroup_get_network_group_name(uint32_t handle);
- Expected<std::string> ConfiguredNetworkGroup_name(uint32_t handle);
- Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroup_get_network_infos(uint32_t handle);
- Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroup_get_all_stream_infos(uint32_t handle, const std::string &network_name);
- Expected<hailo_stream_interface_t> ConfiguredNetworkGroup_get_default_stream_interface(uint32_t handle);
- Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroup_make_output_vstream_params_groups(uint32_t handle,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
- Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroup_get_output_vstream_groups(uint32_t handle);
- Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_input_vstream_infos(uint32_t handle, std::string network_name);
- Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_output_vstream_infos(uint32_t handle, std::string network_name);
- Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_all_vstream_infos(uint32_t handle, std::string network_name);
- hailo_status ConfiguredNetworkGroup_set_scheduler_timeout(uint32_t handle, const std::chrono::milliseconds &timeout,
- const std::string &network_name);
- hailo_status ConfiguredNetworkGroup_set_scheduler_threshold(uint32_t handle, uint32_t threshold, const std::string &network_name);
- Expected<LatencyMeasurementResult> ConfiguredNetworkGroup_get_latency_measurement(uint32_t handle, const std::string &network_name);
- Expected<bool> ConfiguredNetworkGroup_is_multi_context(uint32_t handle);
- Expected<ConfigureNetworkParams> ConfiguredNetworkGroup_get_config_params(uint32_t handle);
-
- Expected<std::vector<uint32_t>> InputVStreams_create(uint32_t net_group_handle,
- const std::map<std::string, hailo_vstream_params_t> &inputs_params, uint32_t pid);
- hailo_status InputVStream_release(uint32_t handle);
- Expected<std::vector<uint32_t>> OutputVStreams_create(uint32_t net_group_handle,
- const std::map<std::string, hailo_vstream_params_t> &output_params, uint32_t pid);
- hailo_status OutputVStream_release(uint32_t handle);
- hailo_status InputVStream_write(uint32_t handle, const MemoryView &buffer);
- hailo_status OutputVStream_read(uint32_t handle, MemoryView buffer);
- Expected<size_t> InputVStream_get_frame_size(uint32_t handle);
- Expected<size_t> OutputVStream_get_frame_size(uint32_t handle);
-
- hailo_status InputVStream_flush(uint32_t handle);
-
- Expected<std::string> InputVStream_name(uint32_t handle);
- Expected<std::string> OutputVStream_name(uint32_t handle);
-
- Expected<std::string> InputVStream_network_name(uint32_t handle);
- Expected<std::string> OutputVStream_network_name(uint32_t handle);
-
- hailo_status InputVStream_abort(uint32_t handle);
- hailo_status OutputVStream_abort(uint32_t handle);
- hailo_status InputVStream_resume(uint32_t handle);
- hailo_status OutputVStream_resume(uint32_t handle);
-
- Expected<hailo_format_t> InputVStream_get_user_buffer_format(uint32_t handle);
- Expected<hailo_format_t> OutputVStream_get_user_buffer_format(uint32_t handle);
-
- Expected<hailo_vstream_info_t> InputVStream_get_info(uint32_t handle);
- Expected<hailo_vstream_info_t> OutputVStream_get_info(uint32_t handle);
-
-private:
- std::unique_ptr<ProtoHailoRtRpc::Stub> m_stub;
-};
-
-}
-
-#endif // HAILO_HAILORT_CLIENT_RPC_HPP_
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hef.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "hailo/hailort.h"
-#include "hailo/hef.hpp"
-#include "hef_internal.hpp"
-#include "hailo/stream.hpp"
-#include "hailo/device.hpp"
-#include "common/utils.hpp"
-#include "hailo/hailort_common.hpp"
-#include "hailort_defaults.hpp"
-#include "common/string_utils.hpp"
-
-#include "pcie_device.hpp"
-#include "context_switch/multi_context/vdma_config_manager.hpp"
-#include "context_switch/single_context/hcp_config_network_group.hpp"
-#include "byte_order.h"
-#include "common/logger_macros.hpp"
-#include "common/file_utils.hpp"
-#include "layer_info.hpp"
-#include "control.hpp"
-#include "context_switch_defs.h"
-
-#include <fstream>
-#include <memory>
-#include <limits>
-#include <stdint.h>
-#include <stdbool.h>
-#include <set>
-#include <algorithm>
-#include <cstring>
-#include <numeric>
-
-namespace hailort
-{
-
-#define HEF__MD5_BUFFER_SIZE (1024)
-#define DEFAULT_BATCH_SIZE (1)
-
-static const uint8_t ENABLE_LCU_CONTROL_WORD[4] = {1, 0, 0, 0};
-
-#pragma pack(push, 1)
-typedef struct {
- uint32_t words_count;
- uint32_t address;
-} CcwHeader;
-#pragma pack(pop)
-
-bool ConfigureNetworkParams::operator==(const ConfigureNetworkParams &other) const
-{
- for (auto &name_param_pair : network_params_by_name) {
- if ((other.network_params_by_name.find(name_param_pair.first) == other.network_params_by_name.end()) ||
- (name_param_pair.second.batch_size != other.network_params_by_name.at(name_param_pair.first).batch_size) ) {
- return false;
- }
- }
- return (batch_size == other.batch_size) && (power_mode == other.power_mode) && (latency == other.latency);
-}
-
-bool ConfigureNetworkParams::operator!=(const ConfigureNetworkParams &other) const
-{
- return !(*this == other);
-}
-
-
-// Note: Can't add the definition in the header. This will lead to the following error:
-// /usr/include/c++/7/bits/unique_ptr.h: In instantiation of 'void std::default_delete<_Tp>::operator()(_Tp*) const [with _Tp = Hef::Impl]':
-// /usr/include/c++/7/bits/unique_ptr.h:263:17: required from 'std::unique_ptr<_Tp, _Dp>::~unique_ptr() [with _Tp = Hef::Impl; _Dp = std::default_delete<Hef::Impl>]'
-// /local/users/projects/platform-sw/hailort/libhailort/src/../include/hailo/hef.hpp:61:7: required from 'Expected<T>::~Expected() [with T = Hef]'
-// /local/users/projects/platform-sw/hailort/hailortcli/run_command.cpp:705:51: required from here
-// /usr/include/c++/7/bits/unique_ptr.h:76:22: error: invalid application of 'sizeof' to incomplete type 'Hef::Impl'
-// static_assert(sizeof(_Tp)>0,
-Hef::~Hef() = default;
-Hef::Hef(Hef &&) = default;
-Hef &Hef::operator=(Hef &&) = default;
-
-Expected<Hef> Hef::create(const std::string &hef_path)
-{
- auto impl = Hef::Impl::create(hef_path);
- CHECK_EXPECTED(impl);
-
- // TODO: can we do this without the copy ctor here (i.e. make the impl as a unique_ptr to begin with)
- return Hef(make_unique_nothrow<Impl>(impl.release()));
-}
-
-Expected<Hef> Hef::create(const MemoryView &hef_buffer)
-{
- auto impl = Hef::Impl::create(hef_buffer);
- CHECK_EXPECTED(impl);
-
- // TODO: can we do this without the copy ctor here (i.e. make the impl as a unique_ptr to begin with)
- return Hef(make_unique_nothrow<Impl>(impl.release()));
-}
-
-Hef::Hef(std::unique_ptr<Impl> pimpl) :
- pimpl(std::move(pimpl))
-{}
-
-Expected<std::vector<hailo_stream_info_t>> Hef::get_input_stream_infos(const std::string &name)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->get_input_stream_infos(network_pair.value().first, network_pair.value().second);
-}
-
-Expected<std::vector<hailo_stream_info_t>> Hef::get_output_stream_infos(const std::string &name)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->get_output_stream_infos(network_pair.value().first, network_pair.value().second);
-}
-
-Expected<std::vector<hailo_stream_info_t>> Hef::get_all_stream_infos(const std::string &name)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->get_all_stream_infos(network_pair.value().first, network_pair.value().second);
-}
-
-Expected<std::vector<hailo_network_info_t>> Hef::get_network_infos(const std::string &net_group_name)
-{
- auto names_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(names_pair);
- return pimpl->get_network_infos(names_pair->first);
-}
-
-Expected<hailo_stream_info_t> Hef::get_stream_info_by_name(const std::string &stream_name,
- hailo_stream_direction_t stream_direction, const std::string &net_group_name)
-{
- // Addressing the situation where net_group_name == ""
- auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(net_group_name_pair);
- auto net_group_name_str = net_group_name_pair->first;
-
- return pimpl->get_stream_info_by_name(stream_name, stream_direction, net_group_name_str);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> Hef::get_input_vstream_infos(const std::string &name)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->get_input_vstream_infos(network_pair.value().first, network_pair.value().second);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> Hef::get_output_vstream_infos(const std::string &name)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->get_output_vstream_infos(network_pair.value().first, network_pair.value().second);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> Hef::get_all_vstream_infos(const std::string &name)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->get_all_vstream_infos(network_pair.value().first, network_pair.value().second);
-}
-
-Expected<std::vector<std::string>> Hef::get_sorted_output_names(const std::string &net_group_name)
-{
- // Addressing the situation where net_group_name == ""
- auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(net_group_name_pair);
- auto net_group_name_str = net_group_name_pair->first;
-
- return pimpl->get_sorted_output_names(net_group_name_str);
-}
-
-Expected<size_t> Hef::get_number_of_input_streams(const std::string &net_group_name)
-{
- // Addressing the situation where net_group_name == ""
- auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(net_group_name_pair);
- auto net_group_name_str = net_group_name_pair->first;
-
- return pimpl->get_number_of_input_streams(net_group_name_str);
-}
-
-Expected<size_t> Hef::get_number_of_output_streams(const std::string &net_group_name)
-{
- // Addressing the situation where net_group_name == ""
- auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(net_group_name_pair);
- auto net_group_name_str = net_group_name_pair->first;
-
- return pimpl->get_number_of_output_streams(net_group_name_str);
-}
-
-Expected<float64_t> Hef::get_bottleneck_fps(const std::string &net_group_name)
-{
- return pimpl->get_bottleneck_fps(net_group_name);
-}
-
-Expected<std::string> Hef::get_vstream_name_from_original_name(const std::string &original_name,
- const std::string &net_group_name)
-{
- return pimpl->get_vstream_name_from_original_name(original_name, net_group_name);
-}
-
-Expected<std::vector<std::string>> Hef::get_original_names_from_vstream_name(const std::string &stream_name,
- const std::string &net_group_name)
-{
- return pimpl->get_original_names_from_vstream_name(stream_name, net_group_name);
-}
-
-Expected<std::vector<std::string>> Hef::get_stream_names_from_vstream_name(const std::string &vstream_name,
- const std::string &net_group_name)
-{
- auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(network_group_name_pair);
- auto net_group_name_str = network_group_name_pair->first;
-
- return pimpl->get_stream_names_from_vstream_name(vstream_name, net_group_name_str);
-}
-
-Expected<std::vector<std::string>> Hef::get_vstream_names_from_stream_name(const std::string &stream_name,
- const std::string &net_group_name)
-{
- auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(network_group_name_pair);
- auto net_group_name_str = network_group_name_pair->first;
-
- return pimpl->get_vstream_names_from_stream_name(stream_name, net_group_name_str);
-}
-
-Expected<Hef::Impl> Hef::Impl::create(const std::string &hef_path)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- Impl hef(hef_path, status);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed creating HEF");
- return make_unexpected(status);
- }
-
- return hef;
-}
-
-Expected<Hef::Impl> Hef::Impl::create(const MemoryView &hef_buffer)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- Impl hef(hef_buffer, status);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed creating HEF");
- return make_unexpected(status);
- }
-
- return hef;
-}
-
-static hailo_status calc_istream_md5(std::ifstream &s, MD5_SUM_t &calculated_md5)
-{
- char md5_buffer[HEF__MD5_BUFFER_SIZE] = {};
- MD5_CTX md5 = {};
-
- auto beg_pos = s.tellg();
- CHECK(-1 != beg_pos, HAILO_FILE_OPERATION_FAILURE, "ifstream::tellg() failed");
-
- MD5_Init(&md5);
- while (!s.eof()) {
- s.read(md5_buffer, HEF__MD5_BUFFER_SIZE);
- CHECK(!s.bad(), HAILO_FILE_OPERATION_FAILURE, "ifstream::read() failed");
- MD5_Update(&md5, &md5_buffer, static_cast<size_t>(s.gcount()));
- }
- MD5_Final(calculated_md5, &md5);
-
- s.clear();
- s.seekg(beg_pos, s.beg);
- CHECK(s.good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Hef::Impl::validate_hef_header(const hef__header_t &header, MD5_SUM_t &calculated_md5, size_t proto_size)
-{
- CHECK(HEADER_MAGIC == BYTE_ORDER__htonl(header.magic), HAILO_INVALID_HEF,
- "HEF magic does not match. detected magic - {:x}", header.magic);
-
- CHECK(HEADER_VERSION == BYTE_ORDER__htonl(header.version), HAILO_INVALID_HEF, "HEF version does not match");
-
- CHECK(proto_size == BYTE_ORDER__htonl(header.hef_proto_length), HAILO_INVALID_HEF,
- "HEF file length does not match");
-
- CHECK(0 == memcmp(&calculated_md5, &header.expected_md5, sizeof(MD5_SUM_t)), HAILO_INVALID_HEF,
- "HEF md5 does not match");
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Hef::Impl::validate_hef_extensions()
-{
- std::vector<std::string> unsupported_extensions;
- for (const auto &extension : m_hef_extensions) {
- if ((extension.type_index() >= m_supported_extensions_bitset.size()) || !m_supported_extensions_bitset.test(extension.type_index())) {
- unsupported_extensions.emplace_back(extension.name());
- }
- }
-
- CHECK(unsupported_extensions.empty(), HAILO_INVALID_HEF, "Failed opening non-compatible HEF with the following unsupported extensions: {}",
- std::accumulate(std::next(unsupported_extensions.begin()), unsupported_extensions.end(), unsupported_extensions[0],
- [] (std::string a, std::string b) { return std::move(a) + ", " + b; }));
-
- return HAILO_SUCCESS;
-}
-
-void Hef::Impl::init_md5(MD5_SUM_t &calculated_md5)
-{
- memcpy(m_md5, calculated_md5, sizeof(m_md5));
-}
-
-hailo_status Hef::Impl::parse_hef_file(const std::string &hef_path)
-{
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
- auto hef_buffer = read_binary_file(hef_path);
- CHECK_EXPECTED_AS_STATUS(hef_buffer);
- m_hef_buffer = hef_buffer.release();
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
- auto hef_file = std::ifstream(hef_path, std::ios::in | std::ios::binary);
- CHECK(hef_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed to open HEF file \"{}\". errno: {}", hef_path, errno);
-
- hef__header_t header = {};
- hef_file.read((char*)&header, sizeof(header));
- CHECK(hef_file.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading HEF header");
-
- auto proto_size = get_istream_size(hef_file);
- CHECK_EXPECTED_AS_STATUS(proto_size);
-
- MD5_SUM_t calculated_md5 = {};
- auto status = calc_istream_md5(hef_file, calculated_md5);
- CHECK_SUCCESS(status);
-
- status = validate_hef_header(header, calculated_md5, proto_size.value());
- CHECK_SUCCESS(status);
-
- init_md5(calculated_md5);
-
- ProtoHEFHef hef_message;
- auto rb = hef_message.ParseFromIstream(&hef_file);
- CHECK(rb, HAILO_INVALID_HEF, "Failed parsing HEF file");
- status = transfer_protobuf_field_ownership(hef_message);
- CHECK_SUCCESS(status);
-
- fill_core_ops();
-
- status = fill_networks_metadata();
- CHECK_SUCCESS(status);
-
- // Must be called after fill_networks_metadata
- status = validate_hef_extensions();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Hef::Impl::parse_hef_memview(const MemoryView &hef_memview)
-{
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
- auto hef_buffer = Buffer::create(hef_memview.data(), hef_memview.size());
- CHECK_EXPECTED_AS_STATUS(hef_buffer);
- m_hef_buffer = hef_buffer.release();
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
- CHECK(hef_memview.size() >= sizeof(hef__header_t), HAILO_INVALID_HEF, "Invalid HEF header");
- const hef__header_t &header = reinterpret_cast<const hef__header_t&>(*hef_memview.data());
-
- auto proto_buffer = (hef_memview.data() + sizeof(header));
- auto proto_size = (hef_memview.size() - sizeof(header));
-
- MD5_CTX md5 = {};
- MD5_SUM_t calculated_md5 = {};
- MD5_Init(&md5);
- MD5_Update(&md5, proto_buffer, proto_size);
- MD5_Final(calculated_md5, &md5);
-
- auto status = validate_hef_header(header, calculated_md5, proto_size);
- CHECK_SUCCESS(status);
-
- init_md5(calculated_md5);
-
- ProtoHEFHef hef_message;
- auto rb = hef_message.ParseFromArray(proto_buffer, static_cast<int>(proto_size));
- CHECK(rb, HAILO_INVALID_HEF, "Failed parsing HEF buffer");
- status = transfer_protobuf_field_ownership(hef_message);
- CHECK_SUCCESS(status);
-
- fill_core_ops();
-
- status = fill_networks_metadata();
- CHECK_SUCCESS(status);
-
- // Must be called after fill_networks_metadata
- status = validate_hef_extensions();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Hef::Impl::fill_networks_metadata()
-{
- fill_extensions_bitset();
-
- NetworkGroupMetadataPerArch metadata;
- uint32_t partial_clusters_layout_bitmap = 0;
-
- for (auto &network_group : m_groups) {
- auto network_group_name = HefUtils::get_network_group_name(*network_group, m_supported_features);
- // TODO: keep metadata per core_op (HRT-8639)
- const auto &core_ops = m_core_ops_per_group[network_group_name];
- assert(core_ops.size() == 1);
- const auto &core_op = core_ops[0];
- if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) {
- if (m_supported_features.hailo_net_flow) {
- for (auto &partial_core_op : core_op.partial_core_ops) {
- partial_clusters_layout_bitmap = partial_core_op->layout.partial_clusters_layout_bitmap();
- auto metadata_per_arch = create_metadata_per_arch(*(partial_core_op->core_op));
- CHECK_EXPECTED_AS_STATUS(metadata_per_arch);
- auto &&arch_metadata = metadata_per_arch.release();
- auto expected_net_flow_ops = create_network_group_ops(*network_group, arch_metadata);
- CHECK_EXPECTED_AS_STATUS(expected_net_flow_ops);
- m_post_process_ops_per_group.insert({arch_metadata.network_group_name(), expected_net_flow_ops.value()});
- metadata.add_metadata(arch_metadata, partial_clusters_layout_bitmap);
- }
- } else {
- for (auto &partial_network_group : network_group->partial_network_groups()) {
- partial_clusters_layout_bitmap = partial_network_group.layout().partial_clusters_layout_bitmap();
- ProtoHEFCoreOpMock partial_core_op{
- partial_network_group.network_group().network_group_metadata(),
- partial_network_group.network_group().preliminary_config(),
- partial_network_group.network_group().contexts(),
- partial_network_group.network_group().sorted_outputs_order(),
- partial_network_group.network_group().fused_layers_metadata(),
- partial_network_group.network_group().networks_names(),
- {}
- };
- auto metadata_per_arch = create_metadata_per_arch(partial_core_op);
- CHECK_EXPECTED_AS_STATUS(metadata_per_arch);
- auto &&arch_metadata = metadata_per_arch.release();
- std::vector<std::shared_ptr<hailort::NetFlowElement>> empty_ops;
- m_post_process_ops_per_group.insert({arch_metadata.network_group_name(), empty_ops});
- metadata.add_metadata(arch_metadata, partial_clusters_layout_bitmap);
- }
- }
- } else {
- partial_clusters_layout_bitmap = PARTIAL_CLUSTERS_LAYOUT_IGNORE;
- auto metadata_per_arch = create_metadata_per_arch(core_op);
- CHECK_EXPECTED_AS_STATUS(metadata_per_arch);
- auto &&arch_metadata = metadata_per_arch.release();
- auto expected_net_flow_ops = create_network_group_ops(*network_group, arch_metadata);
- CHECK_EXPECTED_AS_STATUS(expected_net_flow_ops);
- m_post_process_ops_per_group.insert({arch_metadata.network_group_name(), expected_net_flow_ops.value()});
- metadata.add_metadata(arch_metadata, partial_clusters_layout_bitmap);
- }
- CHECK(!contains(m_network_group_metadata_per_arch, network_group_name),
- HAILO_INVALID_OPERATION, "Network group with the name {} is already configured on the device", network_group_name);
- m_network_group_metadata_per_arch.emplace(network_group_name, metadata);
- }
- return HAILO_SUCCESS;
-}
-
-static Expected<std::vector<ConfigChannelInfo>> parse_config_channels_info(const ProtoHEFCoreOpMock &core_op)
-{
- const auto &metadata = core_op.network_group_metadata;
- // Backwards compatibility for HEFs without the cfg_channels_count field
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(metadata.cfg_channels_count()),
- HAILO_INVALID_HEF, "Invalid cfg channels count");
- const uint8_t cfg_channels_count = (0 == metadata.cfg_channels_count()) ?
- 1 : static_cast<uint8_t>(metadata.cfg_channels_count());
-
-
- std::vector<ConfigChannelInfo> config_channels_info;
- config_channels_info.reserve(cfg_channels_count);
- const auto &cfg_channels_config = metadata.cfg_channels_config();
- for (uint8_t config_stream_index = 0; config_stream_index < cfg_channels_count; config_stream_index++) {
- auto cfg_info = std::find_if(cfg_channels_config.begin(), cfg_channels_config.end(),
- [config_stream_index](const auto &cfg_info)
- {
- return cfg_info.cfg_channel_index() == config_stream_index;
- });
-
- if (cfg_info != cfg_channels_config.end()) {
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(cfg_info->engine_id()), HAILO_INVALID_HEF, "Invalid dma engine index");
- config_channels_info.emplace_back(ConfigChannelInfo{static_cast<uint8_t>(cfg_info->engine_id())});
- }
- else {
- // Not found - can happen on old HEF or hailo8. In those case we want to use the default engine
- config_channels_info.emplace_back(ConfigChannelInfo{vdma::DEFAULT_ENGINE_INDEX});
- }
- }
-
- return config_channels_info;
-}
-
-Expected<NetworkGroupMetadata> Hef::Impl::create_metadata_per_arch(const ProtoHEFCoreOpMock &core_op)
-{
- auto preliminary_context = HefUtils::parse_preliminary_context(core_op.preliminary_config, m_supported_features);
- CHECK_EXPECTED(preliminary_context);
-
- auto dynamic_contexts = HefUtils::parse_dynamic_contexts(core_op, m_supported_features);
- CHECK_EXPECTED(dynamic_contexts);
-
- auto config_channels_info = parse_config_channels_info(core_op);
- CHECK_EXPECTED(config_channels_info);
-
- auto sorted_output_names = HefUtils::get_sorted_output_names(core_op);
- CHECK_EXPECTED(sorted_output_names);
-
- std::vector<std::string> sorted_network_names;
- if (m_supported_features.multi_network_support) {
- sorted_network_names.reserve(core_op.networks_names.size());
- for (auto &partial_network_name : core_op.networks_names) {
- auto network_name = HefUtils::get_network_name(core_op, partial_network_name);
- sorted_network_names.push_back(network_name);
- }
- } else {
- sorted_network_names.push_back(HailoRTDefaults::get_network_name(core_op.network_group_metadata.network_group_name()));
- }
-
- NetworkGroupMetadata metadata_per_arch(core_op.network_group_metadata.network_group_name(),
- preliminary_context.release(), dynamic_contexts.release(), config_channels_info.release(),
- sorted_output_names.release(), m_supported_features, sorted_network_names);
- return metadata_per_arch;
-}
-
-void Hef::Impl::fill_core_ops()
-{
- if (m_supported_features.hailo_net_flow) {
- for (const auto &net_group : m_groups) {
- auto core_op_iter = std::find_if(net_group->ops().begin(), net_group->ops().end(),
- [](auto &op) {
- return op.op_case() == ProtoHEFOp::kCoreOp;
- });
- assert(core_op_iter != m_groups[0]->ops().end());
- std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> partial_core_ops;
- partial_core_ops.reserve(core_op_iter->core_op().partial_core_ops().size());
- for (auto &partial_core_op : core_op_iter->core_op().partial_core_ops()) {
- ProtoHEFCoreOpMock core_op{
- partial_core_op.core_op().network_group_metadata(),
- partial_core_op.core_op().preliminary_config(),
- partial_core_op.core_op().contexts(),
- partial_core_op.core_op().sorted_outputs_order(),
- partial_core_op.core_op().fused_layers_metadata(),
- partial_core_op.core_op().networks_names(),
- {}
- };
- ProtoHEFPartialCoreOpMock partial_core_op_mock{
- std::make_shared<ProtoHEFCoreOpMock>(core_op),
- partial_core_op.layout()
- };
- partial_core_ops.push_back(std::make_shared<ProtoHEFPartialCoreOpMock>(partial_core_op_mock));
- }
- ProtoHEFCoreOpMock core_op{
- core_op_iter->core_op().network_group_metadata(),
- core_op_iter->core_op().preliminary_config(),
- core_op_iter->core_op().contexts(),
- core_op_iter->core_op().sorted_outputs_order(),
- core_op_iter->core_op().fused_layers_metadata(),
- core_op_iter->core_op().networks_names(),
- partial_core_ops
- };
- auto net_group_name = HefUtils::get_network_group_name(*net_group, m_supported_features);
- m_core_ops_per_group[net_group_name].push_back(std::move(core_op));
- }
- } else {
- for (const auto &net_group : m_groups) {
- std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> partial_core_ops;
- partial_core_ops.reserve(net_group->partial_network_groups().size());
- for (auto &partial_network_group : net_group->partial_network_groups()) {
- ProtoHEFCoreOpMock core_op{
- partial_network_group.network_group().network_group_metadata(),
- partial_network_group.network_group().preliminary_config(),
- partial_network_group.network_group().contexts(),
- partial_network_group.network_group().sorted_outputs_order(),
- partial_network_group.network_group().fused_layers_metadata(),
- partial_network_group.network_group().networks_names(),
- {}
- };
- ProtoHEFPartialCoreOpMock partial_core_op{
- std::make_shared<ProtoHEFCoreOpMock>(core_op),
- partial_network_group.layout()
- };
- partial_core_ops.push_back(std::make_shared<ProtoHEFPartialCoreOpMock>(partial_core_op));
- }
- ProtoHEFCoreOpMock core_op{
- net_group->network_group_metadata(),
- net_group->preliminary_config(),
- net_group->contexts(),
- net_group->sorted_outputs_order(),
- net_group->fused_layers_metadata(),
- net_group->networks_names(),
- partial_core_ops
- };
- auto net_group_name = HefUtils::get_network_group_name(*net_group, m_supported_features);
- m_core_ops_per_group[net_group_name].push_back(std::move(core_op));
- }
- }
-}
-
-hailo_status Hef::Impl::transfer_protobuf_field_ownership(ProtoHEFHef &hef_message)
-{
- m_groups.reserve(hef_message.network_groups().size());
- while (!hef_message.network_groups().empty()) {
- // We pass the ownership from protobuf to shared_ptr (it'll call delete when the refcount drops to 0)
- // Note: Protobuf messages are allocated with new
- const auto network_group = hef_message.mutable_network_groups()->ReleaseLast();
- CHECK(nullptr != network_group, HAILO_INTERNAL_FAILURE, "Null network group found while parsing HEF; Unexpected");
- m_groups.emplace_back(network_group);
- }
-
- m_hef_extensions.reserve(hef_message.extensions().size());
- for (const auto &extension : hef_message.extensions()) {
- m_hef_extensions.emplace_back(extension);
- }
-
- m_header.CopyFrom(hef_message.header());
- m_included_features.CopyFrom(hef_message.included_features());
-
- m_hef_optional_extensions.reserve(hef_message.optional_extensions().size());
- for (const auto &optional_extension : hef_message.optional_extensions()) {
- m_hef_optional_extensions.emplace_back(optional_extension);
- }
-
- m_supported_features = get_supported_features(m_header, m_hef_extensions, m_included_features,
- m_hef_optional_extensions);
-
- return HAILO_SUCCESS;
-}
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-const MemoryView Hef::Impl::get_hef_memview()
-{
- return MemoryView(m_hef_buffer);
-}
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-Hef::Impl::Impl(const std::string &hef_path, hailo_status &status)
-{
- status = HAILO_UNINITIALIZED;
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- status = parse_hef_file(hef_path);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed parsing HEF file");
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-Hef::Impl::Impl(const MemoryView &hef_memview, hailo_status &status)
-{
- status = HAILO_UNINITIALIZED;
- GOOGLE_PROTOBUF_VERIFY_VERSION;
-
- status = parse_hef_memview(hef_memview);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed parsing HEF buffer");
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-void Hef::Impl::fill_extensions_bitset()
-{
- for (auto extension : SUPPORTED_EXTENSIONS) {
- m_supported_extensions_bitset[extension] = 1;
- }
-}
-
-SupportedFeatures Hef::Impl::get_supported_features(const ProtoHEFHeader &header,
- const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features,
- const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions)
-{
- SupportedFeatures supported_features{};
- supported_features.padded_ddr_buffers = check_hef_extension(ProtoHEFExtensionType::PADDED_DDR_BUFFERS,
- header, hef_extensions, included_features);
- supported_features.multi_network_support = check_hef_optional_extension(ProtoHEFExtensionType::MULTI_NETWORK_VARIABLE_BATCH_SIZE,
- header, hef_optional_extensions);
- supported_features.multi_context = check_hef_extension(ProtoHEFExtensionType::IS_MULTI_CONTEXTS,
- header, hef_extensions, included_features);
- supported_features.preliminary_run_asap = check_hef_extension(ProtoHEFExtensionType::KO_RUN_ASAP,
- header, hef_extensions, included_features);
- supported_features.hailo_net_flow = check_hef_extension(ProtoHEFExtensionType::HAILO_NET_FLOW,
- header, hef_extensions, included_features);
-
- return supported_features;
-}
-
-Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_network_group_ops(const ProtoHEFNetworkGroup &network_group_proto,
- NetworkGroupMetadata &network_group_meta_data) const
-{
- std::vector<std::shared_ptr<NetFlowElement>> result;
- if (!m_supported_features.hailo_net_flow) {
- return result;
- }
- auto output_layer_infos = network_group_meta_data.get_output_layer_infos();
- std::map<size_t, LayerInfo> pad_index_to_streams_info;
- for (auto &output_layer_info : output_layer_infos) {
- if (output_layer_info.pad_index != INVALID_PAD_INDEX) {
- pad_index_to_streams_info.insert({output_layer_info.pad_index, output_layer_info});
- }
- }
- std::map<size_t, size_t> input_to_output_pads;
- for (auto &pad_edge : network_group_proto.pad_edges()) {
- input_to_output_pads.insert({pad_edge.dst(), pad_edge.src()});
- }
- for (auto &op_proto : network_group_proto.ops()) {
- switch (op_proto.op_case()) {
- case ProtoHEFOp::kCoreOp: {
- break;
- }
- case ProtoHEFOp::kNmsOp: {
- NetFlowYoloNmsElement nms_op{};
- nms_op.type = NetFlowElement::Type::YoloNmsOp;
- nms_op.name = "YOLO_NMS";
- nms_op.nms_score_th = (float32_t)op_proto.nms_op().nms_score_th();
- nms_op.nms_iou_th = (float32_t)op_proto.nms_op().nms_iou_th();
- nms_op.max_proposals_per_class = op_proto.nms_op().max_proposals_per_class();
- nms_op.classes = op_proto.nms_op().classes();
- nms_op.background_removal = op_proto.nms_op().background_removal();
- nms_op.background_removal_index = op_proto.nms_op().background_removal_index();
- nms_op.image_height = (float32_t)op_proto.nms_op().yolo_nms_op().image_height();
- nms_op.image_width = (float32_t)op_proto.nms_op().yolo_nms_op().image_width();
- nms_op.input_division_factor = op_proto.nms_op().yolo_nms_op().input_division_factor();
- if (!nms_op.input_division_factor) {
- nms_op.input_division_factor = 1;
- }
- nms_op.bbox_decoders.reserve(op_proto.nms_op().yolo_nms_op().bbox_decoders().size());
- for (auto &bbox_proto : op_proto.nms_op().yolo_nms_op().bbox_decoders()) {
- YoloBboxDecoder yolo_bbox_decoder;
- for (auto h : bbox_proto.h()) {
- yolo_bbox_decoder.h.push_back(h);
- }
- for (auto w : bbox_proto.w()) {
- yolo_bbox_decoder.w.push_back(w);
- }
- yolo_bbox_decoder.stride = bbox_proto.stride();
- yolo_bbox_decoder.stream_name = pad_index_to_streams_info[input_to_output_pads[bbox_proto.pad_index()]].name;
- nms_op.bbox_decoders.push_back(yolo_bbox_decoder);
- }
- std::set<uint32_t> input_pads;
- std::transform(op_proto.input_pads().begin(), op_proto.input_pads().end(), std::inserter(input_pads, input_pads.begin()),
- [](auto &pad) {
- return pad.index();
- });
- for (auto &input_pad : op_proto.input_pads()) {
- CHECK_AS_EXPECTED(input_to_output_pads.count(input_pad.index()), HAILO_INVALID_HEF,
- "NMS op is not connected to core op");
- auto output_pad_index = input_to_output_pads[input_pad.index()];
- CHECK_AS_EXPECTED(pad_index_to_streams_info.count(output_pad_index), HAILO_INVALID_HEF,
- "Pad {} of post-process {} is not connected to any core output stream",
- input_pad.index(), op_proto.name());
- const auto &op_input_stream = pad_index_to_streams_info[output_pad_index];
- nms_op.input_pads.push_back(NetFlowPad{input_pad.name(), op_input_stream.format, op_input_stream.quant_info, 0});
- nms_op.input_streams.insert(op_input_stream.name);
- }
- hailo_format_t format;
- format.type = HAILO_FORMAT_TYPE_FLOAT32;
- format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
- format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
- assert(op_proto.output_pads().size() == 1);
- auto proto_output_pad = op_proto.output_pads()[0];
- nms_op.output_pads.push_back(NetFlowPad{proto_output_pad.name(), format, hailo_quant_info_t(), nms_op.classes});
- result.push_back(std::shared_ptr<NetFlowElement>(std::make_shared<NetFlowYoloNmsElement>(nms_op)));
-
- // Fill meta-data output vstream info
- auto net_group_name = HefUtils::get_network_group_name(network_group_proto, m_supported_features);
- auto network_name = HailoRTDefaults::get_network_name(net_group_name);
- hailo_vstream_info_t net_flow_output_vstream_info{};
- strncpy(net_flow_output_vstream_info.name, proto_output_pad.name().c_str(), proto_output_pad.name().length() + 1);
- strncpy(net_flow_output_vstream_info.network_name, network_name.c_str(), network_name.length() + 1);
- net_flow_output_vstream_info.direction = HAILO_D2H_STREAM;
- net_flow_output_vstream_info.format = format;
- net_flow_output_vstream_info.nms_shape.number_of_classes = nms_op.classes;
- net_flow_output_vstream_info.nms_shape.max_bboxes_per_class = nms_op.max_proposals_per_class;
- network_group_meta_data.add_output_vstream_info(net_flow_output_vstream_info);
- break;
- }
- default: {
- LOGGER__ERROR("Unsupported Net-Flow Op");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
- }
- }
- return result;
-}
-
-hailo_status get_hw_padding_params(hailo_format_order_t format_order, uint32_t width, uint32_t features, uint32_t hw_data_bytes,
- uint16_t &feature_padding_payload, uint16_t &periph_bytes_per_buffer)
-{
- uint32_t feature_padding_payload_32bit = 0;
- uint32_t periph_bytes_per_buffer_32bit = 0;
-
- // TODO: HRT-3278 dont assume core_buffers_per_frame == height
- switch (format_order)
- {
- case HAILO_FORMAT_ORDER_NHCW:
- case HAILO_FORMAT_ORDER_NHW:
- feature_padding_payload_32bit = width * hw_data_bytes;
- periph_bytes_per_buffer_32bit = feature_padding_payload_32bit * features;
- break;
- case HAILO_FORMAT_ORDER_NHWC:
- case HAILO_FORMAT_ORDER_FCR:
- case HAILO_FORMAT_ORDER_F8CR:
- case HAILO_FORMAT_ORDER_NC:
- case HAILO_FORMAT_ORDER_BAYER_RGB:
- case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
- case HAILO_FORMAT_ORDER_RGB888:
- feature_padding_payload_32bit = features * hw_data_bytes;
- periph_bytes_per_buffer_32bit = feature_padding_payload_32bit * width;
- break;
- default:
- LOGGER__ERROR("unsupported format for HW padding");
- return HAILO_INTERNAL_FAILURE;
- }
-
- CHECK(IS_FIT_IN_UINT16(feature_padding_payload_32bit), HAILO_INVALID_HEF,
- "frame width {} is too big", feature_padding_payload_32bit);
- CHECK(IS_FIT_IN_UINT16(periph_bytes_per_buffer_32bit), HAILO_INVALID_HEF,
- "unpadded bytes per buffer {} is too big", periph_bytes_per_buffer_32bit);
-
- feature_padding_payload = static_cast<uint16_t>(feature_padding_payload_32bit);
- periph_bytes_per_buffer = static_cast<uint16_t>(periph_bytes_per_buffer_32bit);
-
- return HAILO_SUCCESS;
-}
-
-Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(hailo_format_order_t format_order, uint32_t width, uint32_t features,
- uint32_t hw_data_bytes, uint16_t core_buffers_per_frame, uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr)
-{
- CONTROL_PROTOCOL__nn_stream_config_t stream_config = {};
-
- stream_config.core_buffers_per_frame = core_buffers_per_frame;
- stream_config.core_bytes_per_buffer = core_bytes_per_buffer;
- stream_config.periph_buffers_per_frame = core_buffers_per_frame; // periph buffers per frame is the same (even if
- // for hw padding each buffer is smaller).
-
-
- /* For DDR buffering - core buffers is depended on the amount of buffers per PCIe interrupt. No HW padding required */
- if (is_ddr) {
- stream_config.core_buffers_per_frame = 1;
- stream_config.feature_padding_payload = 0;
- stream_config.periph_bytes_per_buffer = stream_config.core_bytes_per_buffer;
- } else {
- if (hw_padding_supported) {
- auto status = get_hw_padding_params(format_order, width, features, hw_data_bytes,
- stream_config.feature_padding_payload, stream_config.periph_bytes_per_buffer);
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else {
- stream_config.feature_padding_payload = 0;
- stream_config.periph_bytes_per_buffer = stream_config.core_bytes_per_buffer;
- }
- /* For now, no support for buffer padding */
- stream_config.buffer_padding_payload = 0;
- stream_config.buffer_padding = 0;
- }
- return stream_config;
-}
-
-Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(const ProtoHEFEdgeLayerBase &edge_layer,
- bool hw_padding_supported, const ProtoHEFEdgeConnectionType &edge_connection_type)
-{
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(edge_layer.core_bytes_per_buffer()), HAILO_INVALID_HEF,
- "core_bytes_per_buffer is too big");
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(edge_layer.core_buffers_per_frame()), HAILO_INVALID_HEF,
- "core_buffers_per_frame is too big");
-
- auto format_order_exp = HailoRTDefaults::get_device_format_order(edge_layer.format());
- CHECK_EXPECTED(format_order_exp);
- auto format_order = format_order_exp.release();
- auto is_ddr = ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR == edge_connection_type;
-
- // Width and features only used in case hw_padding is supported. In that case, they represent the HW shape (without padding)
- return parse_nn_stream_config(format_order, edge_layer.width(), edge_layer.features(),
- edge_layer.data_bytes(), static_cast<uint16_t>(edge_layer.core_buffers_per_frame()),
- static_cast<uint16_t>(edge_layer.core_bytes_per_buffer()), hw_padding_supported, is_ddr);
-}
-
-Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(const LayerInfo &edge_layer, bool hw_padding_supported)
-{
- // TODO HRT-7177 - pass interface to layer info instead of re-calculated Layer info from stream_internal.hpp
- // After passing stream interface, there is no need for this function. Just use CONTROL_PROTOCOL__nn_stream_config_t from layer info.
- auto is_ddr = false; // This function is called only on boundary layers, so no DDR
- return parse_nn_stream_config(edge_layer.format.order, edge_layer.hw_shape.width, edge_layer.hw_shape.features,
- edge_layer.hw_data_bytes, edge_layer.nn_stream_config.core_buffers_per_frame,
- edge_layer.nn_stream_config.core_bytes_per_buffer, hw_padding_supported, is_ddr);
-}
-
-bool HefConfigurator::is_hw_padding_supported(bool is_boundary, bool is_mux, hailo_format_order_t format_order,
- uint16_t core_buffers_per_frame, uint32_t height, uint32_t width, uint32_t features, uint32_t hw_data_bytes)
-{
- if (!is_boundary || is_mux) {
- return false;
- }
-
- // TODO: HRT-4462 support more orders
- switch (format_order)
- {
- case HAILO_FORMAT_ORDER_NHCW:
- break;
- default:
- LOGGER__DEBUG("HW padding is not supported for format {} ", format_order);
- return false;
- }
-
- if (core_buffers_per_frame != height) {
- // TODO: HRT-3278
- LOGGER__DEBUG("HW padding is supported only on layers with core_buffers_per_frame == height");
- return false;
- }
-
- if (((width * features) % 8) != 0) {
- // TODO: HRT-963 support chunks
- LOGGER__DEBUG("HW padding is supported only when periph_bytes_per_buffer is a multiple of 8");
- return false;
- }
-
- if ((width * features * hw_data_bytes) >
- (HAILO8_INBOUND_DATA_STREAM_SIZE - 1)) {
- // TODO: HRT-4177
- LOGGER__DEBUG("HW padding is supported only on layers with features * width * data size > stream size");
- return false;
- }
- return true;
-}
-
-bool HefConfigurator::is_hw_padding_supported(const LayerInfo &layer_info)
-{
- /* If the network is transposed, the width and height are swapped in LayerInfo c'tor, so need to swap it again for calculations */
- auto height = layer_info.shape.height;
- auto width = layer_info.shape.width;
- if (layer_info.format.flags & HAILO_FORMAT_FLAGS_TRANSPOSED) {
- std::swap(height, width);
- }
-
- auto is_boundary = true; // This function is called only on boundary layers
- return is_hw_padding_supported(is_boundary, layer_info.is_mux, layer_info.format.order,
- layer_info.nn_stream_config.core_buffers_per_frame, height, width,
- layer_info.shape.features, layer_info.hw_data_bytes);
-}
-
-bool HefConfigurator::is_hw_padding_supported(const ProtoHEFEdgeLayer &edge_layer)
-{
- auto is_boundary = (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY == edge_layer.context_switch_info().edge_connection_type());
- auto is_mux = (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == edge_layer.edge_layer_type());
- auto edge_layer_base = edge_layer.layer_info().edge_layer_base();
- auto format_order_exp = HailoRTDefaults::get_device_format_order(edge_layer_base.format());
- if (!format_order_exp) {
- LOGGER__DEBUG("Failed to get format order. Not enabling hw padding");
- return false;
- }
-
- if (!IS_FIT_IN_UINT16(edge_layer_base.core_buffers_per_frame())) {
- LOGGER__DEBUG("Invalid core_buffers_per_frame. Not enabling hw padding");
- return false;
- }
-
- auto format_order = format_order_exp.release();
- return is_hw_padding_supported(is_boundary, is_mux, format_order, static_cast<uint16_t>(edge_layer_base.core_buffers_per_frame()),
- edge_layer_base.height(), edge_layer_base.width(), edge_layer_base.features(), edge_layer_base.data_bytes());
-}
-
-Expected<std::vector<hailo_stream_info_t>> Hef::Impl::get_input_stream_infos(const std::string &net_group_name,
- const std::string &network_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_input_stream_infos(network_name);
-}
-
-Expected<std::vector<hailo_stream_info_t>> Hef::Impl::get_output_stream_infos(const std::string &net_group_name,
- const std::string &network_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_output_stream_infos(network_name);
-}
-
-Expected<std::vector<hailo_stream_info_t>> Hef::Impl::get_all_stream_infos(const std::string &net_group_name,
- const std::string &network_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_all_stream_infos(network_name);
-}
-
-Expected<std::vector<hailo_network_info_t>> Hef::Impl::get_network_infos(const std::string &net_group_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_network_infos();
-}
-
-Expected<hailo_stream_info_t> Hef::Impl::get_stream_info_by_name(const std::string &stream_name,
- hailo_stream_direction_t stream_direction, const std::string &net_group_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- if (HAILO_H2D_STREAM == stream_direction) {
- auto stream_infos = network_group_metadata->get_input_stream_infos();
- CHECK_EXPECTED(stream_infos);
- for (auto &stream_info : stream_infos.value()) {
- if (stream_name == stream_info.name) {
- return std::move(stream_info);
- }
- }
- } else {
- auto stream_infos = network_group_metadata->get_output_stream_infos();
- CHECK_EXPECTED(stream_infos);
- for (auto &stream_info : stream_infos.value()) {
- if (stream_name == stream_info.name) {
- return std::move(stream_info);
- }
- }
- }
-
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> Hef::Impl::get_input_vstream_infos(const std::string &net_group_name,
- const std::string &network_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_input_vstream_infos(network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> Hef::Impl::get_output_vstream_infos(const std::string &net_group_name,
- const std::string &network_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_output_vstream_infos(network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> Hef::Impl::get_all_vstream_infos(const std::string &net_group_name,
- const std::string &network_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
- return network_group_metadata->get_all_vstream_infos(network_name);
-}
-
-const std::vector<ProtoHEFNetworkGroupPtr>& Hef::Impl::network_groups() const
-{
- return m_groups;
-};
-
-const std::vector<ProtoHEFCoreOpMock>& Hef::Impl::core_ops(const std::string &net_group_name) const
-{
- assert(contains(m_core_ops_per_group, net_group_name));
- return m_core_ops_per_group.at(net_group_name);
-};
-
-const std::vector<std::shared_ptr<hailort::NetFlowElement>> Hef::Impl::post_process_ops(const std::string &net_group_name) const
-{
- assert(contains(m_post_process_ops_per_group, net_group_name));
- return m_post_process_ops_per_group.at(net_group_name);
-}
-
-bool Hef::Impl::check_hef_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
- const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features)
-{
- if (header.version() > 0) {
- return std::find_if(hef_extensions.begin(), hef_extensions.end(),
- [extension] (const ProtoHEFExtension &extended_feature) { return ((ProtoHEFExtensionType)extended_feature.type_index()) == extension; }) != hef_extensions.end();
- }
-
- // ProtoHEFIncludedFeature is deprecated
- switch (extension) {
- case ProtoHEFExtensionType::ABBALE:
- return included_features.abbale();
- case ProtoHEFExtensionType::POSTED_WRITES:
- return included_features.posted_writes();
- case ProtoHEFExtensionType::DDR:
- return included_features.ddr();
- case ProtoHEFExtensionType::IS_MULTI_CONTEXTS:
- return included_features.is_multi_context();
- case ProtoHEFExtensionType::COMPRESSED_PARAMS:
- return included_features.compressed_params();
- case ProtoHEFExtensionType::TRANSPOSE_COMPONENT:
- return included_features.transpose_component();
- case ProtoHEFExtensionType::PADDED_DDR_BUFFERS:
- return included_features.padded_ddr_buffers();
- default:
- return false;
- }
-}
-
-bool Hef::Impl::check_hef_optional_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
- const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions)
-{
- if (header.version() > 0) {
- return std::find_if(hef_optional_extensions.begin(), hef_optional_extensions.end(),
- [extension] (const ProtoHEFOptionalExtension &extended_feature) { return ((ProtoHEFExtensionType)extended_feature.type_index()) == extension; }) != hef_optional_extensions.end();
- }
-
- /* optional extensions are only for m_header.version() > 0.
- For lower version, those features are not supported */
- return false;
-}
-
-Expected<std::pair<std::string, std::string>> Hef::Impl::get_network_group_and_network_name(const std::string &name)
-{
- std::string network_group_name;
- if (name.empty()) {
- // Name is not given - addressing all networks in the first network_group
- network_group_name = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
- m_groups[0]->partial_network_groups(0).network_group().network_group_metadata().network_group_name()
- : m_groups[0]->network_group_metadata().network_group_name();
- LOGGER__INFO("No name was given. Addressing all networks of default network_group: {}",
- network_group_name);
- auto network_name = HailoRTDefaults::get_network_name(network_group_name);
- return std::make_pair(network_group_name, network_name);
- } else {
- const ProtoHEFNetworkGroup *network_group_ptr = nullptr;
- for (const auto &network_group : m_groups) {
- // TODO: Handle new HEFs
- network_group_ptr = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
- &network_group->partial_network_groups(0).network_group()
- : network_group.get();
- network_group_name = network_group_ptr->network_group_metadata().network_group_name();
-
- // Look for network_group with the given name
- if (name == network_group_name) {
- auto network_name = HailoRTDefaults::get_network_name(network_group_name);
- return std::make_pair(network_group_name, network_name);
- }
- // Look for network with the given name
- for (const auto &partial_network_name : network_group_ptr->networks_names()) {
- auto full_network_name = HefUtils::get_network_name(network_group_name, partial_network_name);
- if (name == full_network_name) {
- return std::make_pair(network_group_name, full_network_name);
- }
- }
- // Handle case of deafult_network_name
- if (name == HailoRTDefaults::get_network_name(network_group_name)) {
- return std::make_pair(network_group_name, name);
- }
- }
- }
-
- LOGGER__ERROR("Failed to find network or network_group with the name {}",
- name);
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-// TODO: core_ops names?
-Expected<std::shared_ptr<ProtoHEFCoreOpMock>> Hef::Impl::get_core_op_by_net_group_name(const std::string &net_group_name)
-{
- if ("" == net_group_name) {
- auto network_group_ptr = m_groups[0];
- auto network_group_name = HefUtils::get_network_group_name(*network_group_ptr, m_supported_features);
- LOGGER__INFO("No network_group name was given. Addressing default network_group: {}", network_group_name);
- const auto &core_op = m_core_ops_per_group[network_group_name][0];
- if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) {
- auto partial_core_op = core_op.partial_core_ops[0];
- return std::make_shared<ProtoHEFCoreOpMock>(*(partial_core_op->core_op));
- }
- return std::make_shared<ProtoHEFCoreOpMock>(core_op);
- }
- CHECK_AS_EXPECTED(contains(m_core_ops_per_group, net_group_name), HAILO_NOT_FOUND,
- "HEF does not contain network_group with name {}", net_group_name);
- const auto &core_op = m_core_ops_per_group[net_group_name][0];
- if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) {
- auto partial_core_op = core_op.partial_core_ops[0];
- return std::make_shared<ProtoHEFCoreOpMock>(*(partial_core_op->core_op));
- }
- return std::make_shared<ProtoHEFCoreOpMock>(core_op);
-}
-
-Expected<size_t> Hef::Impl::get_number_of_input_streams(const std::string &net_group_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- auto input_layer_infos = network_group_metadata->get_input_layer_infos();
- return input_layer_infos.size();
-}
-
-Expected<size_t> Hef::Impl::get_number_of_output_streams(const std::string &net_group_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- auto output_layer_infos = network_group_metadata->get_output_layer_infos();
- return output_layer_infos.size();
-}
-
-static Expected<LayerType> get_layer_type(const ProtoHEFEdgeConnectionType &edge_connection_type)
-{
- switch (edge_connection_type) {
- case PROTO__EDGE_CONNECTION_TYPE__BOUNDARY:
- return LayerType::BOUNDARY;
- case PROTO__EDGE_CONNECTION_TYPE__INTERMEDIATE:
- return LayerType::INTER_CONTEXT;
- case PROTO__EDGE_CONNECTION_TYPE__DDR:
- return LayerType::DDR;
- default:
- LOGGER__ERROR("Not supported edge connection type {}", edge_connection_type);
- return make_unexpected(HAILO_INVALID_HEF);
- }
-}
-
-hailo_status HefUtils::fill_layer_info_with_base_info(const ProtoHEFEdgeLayerBase &base_info,
- const ProtoHEFEdgeConnectionType &edge_connection_type, const ProtoHEFNetworkGroupMetadata &network_group_proto,
- bool hw_padding_supported, bool transposed, const uint8_t context_index, const uint8_t network_index,
- LayerInfo &layer_info)
-{
- auto format_order_exp = HailoRTDefaults::get_device_format_order(base_info.format());
- CHECK_EXPECTED_AS_STATUS(format_order_exp);
-
- auto format_oder = format_order_exp.release();
-
- auto layer_type = get_layer_type(edge_connection_type);
- CHECK_EXPECTED_AS_STATUS(layer_type);
- layer_info.type = layer_type.value();
-
- if (HEF__FORMAT__NMS != base_info.format()) {
- layer_info.shape.height = base_info.height();
- layer_info.shape.width = base_info.width();
- layer_info.shape.features = base_info.features();
- } else {
- layer_info.shape.height = static_cast<uint32_t>(base_info.additional_info().nms_info().number_of_classes());
- layer_info.shape.width = HailoRTCommon::BBOX_PARAMS;
- layer_info.shape.features = static_cast<uint32_t>(base_info.additional_info().nms_info().max_output_size() *
- base_info.additional_info().nms_info().input_division_factor());
- }
- if (hw_padding_supported) {
- layer_info.hw_shape.height = base_info.height();
- layer_info.hw_shape.width = base_info.width();
- layer_info.hw_shape.features = base_info.features();
- }
- else {
- layer_info.hw_shape.height = base_info.padded_height();
- layer_info.hw_shape.width = base_info.padded_width();
- layer_info.hw_shape.features = base_info.padded_features();
- }
- layer_info.hw_data_bytes = base_info.data_bytes();
-
- // TODO: remove duplications with stream info parse
- layer_info.format.order = format_oder;
- layer_info.format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
-
- // The check network_group_proto.transposed_net() is for supporting backward compatability for old hefs
- if ((network_group_proto.transposed_net() || transposed) && (layer_info.format.order != HAILO_FORMAT_ORDER_NC)) {
- std::swap(layer_info.shape.height, layer_info.shape.width);
- layer_info.format.flags |= HAILO_FORMAT_FLAGS_TRANSPOSED;
- }
-
- if (base_info.host_argmax()) {
- layer_info.format.flags |= HAILO_FORMAT_FLAGS_HOST_ARGMAX;
- layer_info.shape.features = 1;
- }
-
- auto type = HailoRTCommon::get_format_type(layer_info.hw_data_bytes);
- CHECK_EXPECTED_AS_STATUS(type);
- layer_info.format.type = type.value();
-
- auto nn_stream_config = HefConfigurator::parse_nn_stream_config(base_info, hw_padding_supported,
- edge_connection_type);
- CHECK_EXPECTED_AS_STATUS(nn_stream_config, "Failed parse nn stream config");
- layer_info.nn_stream_config = nn_stream_config.release();
- layer_info.network_index = network_index;
- layer_info.context_index = context_index;
-
- CHECK(IS_FIT_IN_UINT8(base_info.sys_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid sys_index: {}.", base_info.sys_index());
- layer_info.stream_index = static_cast<uint8_t>(base_info.sys_index());
- CHECK(IS_FIT_IN_UINT8(base_info.engine_id()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid engine_id: {}.", base_info.engine_id());
- layer_info.dma_engine_index = static_cast<uint8_t>(base_info.engine_id());
-
- if (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info.format.order) {
- auto expected_nms_info = parse_proto_nms_info(base_info.additional_info().nms_info());
- CHECK_EXPECTED_AS_STATUS(expected_nms_info);
- layer_info.nms_info = expected_nms_info.release();
- }
-
- layer_info.max_shmifo_size = base_info.max_shmifo_size();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::fill_layer_info(const ProtoHEFEdgeLayerInfo &info,
- const ProtoHEFEdgeConnectionType &edge_connection_type,
- const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
- bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
- uint8_t network_index, LayerInfo &layer_info)
-{
- auto status = fill_layer_info_with_base_info(info.edge_layer_base(), edge_connection_type, core_op.network_group_metadata,
- hw_padding_supported, info.transposed(), context_index, network_index, layer_info);
- CHECK_SUCCESS(status);
-
- if (HAILO_MAX_STREAM_NAME_SIZE < (info.name().length() + 1)) {
- LOGGER__ERROR("The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.name());
- return HAILO_INTERNAL_FAILURE;
- }
- if (HAILO_MAX_NETWORK_NAME_SIZE < (partial_network_name.length() + 1)) {
- LOGGER__ERROR("The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", partial_network_name);
- return HAILO_INTERNAL_FAILURE;
- }
- layer_info.name = info.name();
-
- layer_info.network_name = HefUtils::get_network_name(core_op, partial_network_name);
- layer_info.is_mux = false;
- layer_info.direction = direction;
- layer_info.quant_info.limvals_max = info.numeric_info().limvals_max();
- layer_info.quant_info.limvals_min = info.numeric_info().limvals_min();
- layer_info.quant_info.qp_scale = info.numeric_info().qp_scale();
- layer_info.quant_info.qp_zp = info.numeric_info().qp_zp();
- // Simulation info
- assert (1 == info.edge_layer_base().buffer_indices_size());
- layer_info.buffer_indices.cluster_index = info.edge_layer_base().buffer_indices(0).cluster_index();
- layer_info.buffer_indices.index = info.edge_layer_base().buffer_indices(0).index();
-
- layer_info.is_defused_nms = core_op.fused_layers_metadata.network_has_fused_layers() &&
- (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info.format.order) && layer_info.nms_info.is_defused;
-
- if (layer_info.is_defused_nms) {
- for (const auto &fused_layer : core_op.fused_layers_metadata.fused_layers()) {
- if (fused_layer.layer_info().name() == layer_info.nms_info.defuse_info.original_name) {
- // This creates a new LayerInfo for the fused layer *for each defused layer*, even though they all share the same fused layer.
- // TODO Make it so all defused layer reference the same LayerInfo of the fused layer.
- LayerInfo fused_layer_info = {};
- status = fill_fused_nms_info(fused_layer, fused_layer_info, layer_info.quant_info, layer_info.network_name);
- CHECK_SUCCESS(status);
- layer_info.fused_nms_layer.push_back(fused_layer_info);
- break;
- }
- }
- CHECK(0 != layer_info.fused_nms_layer.size(), HAILO_NOT_FOUND, "Could not find the fused layer {}", layer_info.nms_info.defuse_info.original_name);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::fill_fused_nms_info(const ProtoHEFEdgeLayerFused &info, LayerInfo &layer_info,
- hailo_quant_info_t &defuse_quant_info, const std::string &network_name)
-{
- auto base_info = info.layer_info().edge_layer_base();
- auto format_order_exp = HailoRTDefaults::get_device_format_order(base_info.format());
- CHECK_EXPECTED_AS_STATUS(format_order_exp);
- layer_info.format.order = format_order_exp.release();
- layer_info.format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
-
- layer_info.shape.height = static_cast<uint32_t>(info.nms_info().number_of_classes());
- layer_info.shape.width = HailoRTCommon::BBOX_PARAMS;
- layer_info.shape.features = static_cast<uint32_t>(info.nms_info().max_output_size() *
- info.nms_info().input_division_factor());
-
- layer_info.hw_data_bytes = base_info.data_bytes();
-
- auto type = HailoRTCommon::get_format_type(layer_info.hw_data_bytes);
- CHECK_EXPECTED_AS_STATUS(type);
- layer_info.format.type = type.value();
-
- auto expected_nms_info = parse_proto_nms_info(info.nms_info());
- CHECK_EXPECTED_AS_STATUS(expected_nms_info);
- layer_info.nms_info = expected_nms_info.release();
-
- if (HAILO_MAX_STREAM_NAME_SIZE < (info.layer_info().name().length() + 1)) {
- LOGGER__ERROR("The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.layer_info().name());
- return HAILO_INTERNAL_FAILURE;
- }
- layer_info.name = info.layer_info().name();
- layer_info.network_name = network_name;
- layer_info.is_mux = false;
- layer_info.direction = HAILO_D2H_STREAM;
- // Due to bug in SDK quant info of fused layer is empty, so we use the quant info of the defused layer
- layer_info.quant_info = defuse_quant_info;
-
- // Simulation info
- assert (1 == info.layer_info().edge_layer_base().buffer_indices_size());
- layer_info.buffer_indices.cluster_index = info.layer_info().edge_layer_base().buffer_indices(0).cluster_index();
- layer_info.buffer_indices.index = info.layer_info().edge_layer_base().buffer_indices(0).index();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::fill_mux_info(const ProtoHEFEdgeLayerMux &info,
- const ProtoHEFEdgeConnectionType &edge_connection_type,
- const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
- bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
- uint8_t network_index, LayerInfo &layer_info)
-{
- const bool transposed = false;
- auto status = fill_layer_info_with_base_info(info.edge_layer_base(), edge_connection_type, core_op.network_group_metadata,
- hw_padding_supported, transposed, context_index, network_index, layer_info);
- CHECK_SUCCESS(status);
-
- if (HAILO_MAX_STREAM_NAME_SIZE < (info.name().length() + 1)) {
- LOGGER__ERROR("The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.name());
- return HAILO_INTERNAL_FAILURE;
- }
- if (HAILO_MAX_NETWORK_NAME_SIZE < (partial_network_name.length() + 1)) {
- LOGGER__ERROR("The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", partial_network_name);
- return HAILO_INTERNAL_FAILURE;
- }
- layer_info.name = info.name();
-
- layer_info.network_name = HefUtils::get_network_name(core_op, partial_network_name);
- layer_info.is_mux = true;
- layer_info.predecessor.reserve(info.mux_data().number_of_predecessors());
- layer_info.height_gcd = info.mux_data().height_gcd();
- layer_info.height_ratios.reserve(info.mux_data().height_ratios_list_len());
- for (const auto &height_ratio : info.mux_data().height_ratios_list()) {
- layer_info.height_ratios.emplace_back(height_ratio);
- }
- // Simulation info
- assert (1 == info.edge_layer_base().buffer_indices_size());
- layer_info.buffer_indices.cluster_index = info.edge_layer_base().buffer_indices(0).cluster_index();
- layer_info.buffer_indices.index = info.edge_layer_base().buffer_indices(0).index();
-
- for (uint32_t i = 0; i < info.mux_data().number_of_predecessors(); i++) {
- LayerInfo temp_layer = {};
- switch (info.predecessors(i).edge_case()) {
- case ProtoHefEdge::kLayerInfo:
- status = fill_layer_info(info.predecessors(i).layer_info(), edge_connection_type, core_op,
- direction, hw_padding_supported, context_index, partial_network_name, network_index, temp_layer);
- if (HAILO_SUCCESS != status) {
- return status;
- }
- layer_info.predecessor.push_back(temp_layer);
- break;
- case ProtoHefEdge::kLayerMux:
- status = fill_mux_info(info.predecessors(i).layer_mux(), edge_connection_type, core_op,
- direction, hw_padding_supported, context_index, partial_network_name, network_index, temp_layer);
- if (HAILO_SUCCESS != status) {
- return status;
- }
- layer_info.predecessor.push_back(temp_layer);
- break;
- default:
- LOGGER__ERROR("Invalid layer type");
- return HAILO_INTERNAL_FAILURE;
- break;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::fill_boundary_layers_info(
- const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer,
- const SupportedFeatures &supported_features,
- ContextMetadata &context_metadata)
-{
- auto layer_info = get_boundary_layer_info(core_op, context_index, layer, supported_features);
- CHECK_EXPECTED_AS_STATUS(layer_info);
-
- context_metadata.add_boundary_layer(layer_info.release());
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::fill_inter_context_layers_info(
- const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer,
- const SupportedFeatures &supported_features,
- ContextMetadata &context_metadata)
-{
- auto layer_info = get_inter_context_layer_info(core_op, context_index, layer, supported_features);
- CHECK_EXPECTED_AS_STATUS(layer_info);
-
- context_metadata.add_inter_context_layer(layer_info.release());
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::fill_ddr_layers_info(
- const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer,
- const SupportedFeatures &supported_features,
- ContextMetadata &context_metadata)
-{
- auto layer_info = get_ddr_layer_info(core_op, context_index, layer, supported_features);
- CHECK_EXPECTED_AS_STATUS(layer_info);
-
- context_metadata.add_ddr_layer(layer_info.release());
- return HAILO_SUCCESS;
-}
-
-hailo_status HefUtils::check_ddr_pairs_match(
- const std::vector<LayerInfo> &context_ddr_input_layers,
- const std::vector<LayerInfo> &context_ddr_output_layers,
- const uint8_t context_index)
-{
- CHECK(context_ddr_input_layers.size() == context_ddr_output_layers.size(), HAILO_INVALID_HEF,
- "DDR pairs must be equal in size for context {}" ,context_index);
-
- for (auto const &ddr_output_layer : context_ddr_output_layers) {
- auto matching_input_stream = ddr_output_layer.connected_context_info.stream_index;
- bool found_mathing_layer = false;
- for (auto const &ddr_input_layer : context_ddr_input_layers) {
- if (ddr_input_layer.stream_index == matching_input_stream) {
- CHECK(!found_mathing_layer, HAILO_INVALID_HEF, "Found multiple input DDR streams for single ddr output stream");
- found_mathing_layer = true;
- CHECK(ddr_output_layer.nn_stream_config.core_bytes_per_buffer == ddr_input_layer.nn_stream_config.core_bytes_per_buffer,
- HAILO_INVALID_HEF, "both sides for DDR pair must have the same core_bytes_per_buffer.\n"
- "context index {}. Output stream index - {} output side core_bytes_per_buffer - {}."
- "input stream index {}.input size core_bytes_per_buffer - {}",
- context_index, ddr_output_layer.stream_index, ddr_output_layer.nn_stream_config.core_bytes_per_buffer,
- ddr_input_layer.stream_index, ddr_input_layer.nn_stream_config.core_bytes_per_buffer);
- CHECK(ddr_output_layer.ddr_info.total_buffers_per_frame == ddr_input_layer.ddr_info.total_buffers_per_frame,
- HAILO_INVALID_HEF, "both sides for DDR pair must have the same total_buffers_per_frame.\n"
- "context index {}. Output stream index - {} output side total_buffers_per_frame - {}."
- "input stream index {}. input size total_buffers_per_frame - {}",
- context_index, ddr_output_layer.stream_index, ddr_output_layer.ddr_info.total_buffers_per_frame,
- ddr_input_layer.stream_index, ddr_input_layer.ddr_info.total_buffers_per_frame);
- }
- }
- CHECK(found_mathing_layer, HAILO_INVALID_HEF, "didn't find any match for context {} output stream {}", context_index, ddr_output_layer.stream_index);
- }
-
- return HAILO_SUCCESS;
-}
-
-static Expected<ContextSwitchConfigActionPtr> parse_trigger_action(const ProtoHEFTrigger &trigger_proto)
-{
- switch (trigger_proto.trigger_case()) {
- case ProtoHEFTrigger::kTriggerLcu:
- {
- const auto cluster_index = trigger_proto.trigger_lcu().cluster_index();
- const auto lcu_index = trigger_proto.trigger_lcu().lcu_index();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(cluster_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid cluster_index: {}.", cluster_index);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(lcu_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid lcu_index: {}.", lcu_index);
- return WaitForLcuAction::create(static_cast<uint8_t>(cluster_index), static_cast<uint8_t>(lcu_index));
- }
- case ProtoHEFTrigger::kTriggerAllDataWasSent:
- {
- const auto stream_index = trigger_proto.trigger_all_data_was_sent().shmifo_index();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(stream_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid stream_index: {}.", stream_index);
- return WaitOutputTransferDoneAction::create(static_cast<uint8_t>(stream_index));
- }
- case ProtoHEFTrigger::kTriggerDmaIdle:
- {
- const auto stream_index = trigger_proto.trigger_dma_idle().shmifo_index();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(stream_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid stream_index: {}.", stream_index);
- return WaitDmaIdleAction::create(static_cast<uint8_t>(stream_index));
- }
- case ProtoHEFTrigger::kTriggerNms:
- {
- const auto aggregator_index = trigger_proto.trigger_nms().aggregator_index();
- const auto pred_cluster_ob_index = trigger_proto.trigger_nms().pred_cluster_ob_index();
- const auto pred_cluster_ob_cluster_index = trigger_proto.trigger_nms().pred_cluster_ob_cluster_index();
- const auto pred_cluster_ob_interface = trigger_proto.trigger_nms().pred_cluster_ob_interface();
- const auto succ_prepost_ob_index = trigger_proto.trigger_nms().succ_prepost_ob_index();
- const auto succ_prepost_ob_interface = trigger_proto.trigger_nms().succ_prepost_ob_interface();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(aggregator_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid aggregator_index: {}.", aggregator_index);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(pred_cluster_ob_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid pred_cluster_ob_index: {}.", pred_cluster_ob_index);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(pred_cluster_ob_cluster_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid pred_cluster_ob_cluster_index: {}.", pred_cluster_ob_cluster_index);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(pred_cluster_ob_interface), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid pred_cluster_ob_interface: {}.", pred_cluster_ob_interface);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(succ_prepost_ob_index), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid succ_prepost_ob_index: {}.", succ_prepost_ob_index);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(succ_prepost_ob_interface), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid succ_prepost_ob_interface: {}.", succ_prepost_ob_interface);
-
- return WaitNmsIdleAction::create(static_cast<uint8_t>(aggregator_index),
- static_cast<uint8_t>(pred_cluster_ob_index), static_cast<uint8_t>(pred_cluster_ob_cluster_index),
- static_cast<uint8_t>(pred_cluster_ob_interface), static_cast<uint8_t>(succ_prepost_ob_index),
- static_cast<uint8_t>(succ_prepost_ob_interface));
- }
- case ProtoHEFTrigger::kTriggerAllDataWasReceived:
- {
- LOGGER__ERROR("kTriggerAllDataWasReceived trigger is not supported");
- return make_unexpected(HAILO_NOT_IMPLEMENTED);
- }
- case ProtoHEFTrigger::kTriggerNone:
- {
- return NoneAction::create();
- }
- default:
- LOGGER__ERROR("Unsupported trigger given {}", trigger_proto.trigger_case());
- return make_unexpected(HAILO_INVALID_HEF);
- }
-}
-
-// Parse initial_l3 register from old hef
-constexpr uint32_t HAILO8_INITIAL_L3_CUT_MASK = 0x0000007F;
-constexpr uint32_t HAILO8_INITIAL_L3_OFFSET_MASK = 0x0007FF80L;
-constexpr uint32_t HAILO8_INITIAL_L3_OFFSET_SHIFT = 7;
-constexpr uint32_t HAILO8_INITIAL_L3_OFFSET_BYTES_GRANULARITY_SHIFT = 3;
-
-
-static std::pair<uint8_t, uint16_t> old_hef_parse_initial_l3(uint32_t initial_l3)
-{
- // parse initial l3 as written in hailo8 initial_l3 format -
- // 7 bits of initial_l3_cut
- // 12 bits of initial_l3_offset, offset in 256 bits (8 bytes) granularity.
- const uint8_t initial_l3_cut = static_cast<uint8_t>(initial_l3 & HAILO8_INITIAL_L3_CUT_MASK);
- const uint32_t initial_l3_offset_256 = (initial_l3 & HAILO8_INITIAL_L3_OFFSET_MASK) >> HAILO8_INITIAL_L3_OFFSET_SHIFT;
- const uint16_t initial_l3_offset = static_cast<uint16_t>(initial_l3_offset_256 << HAILO8_INITIAL_L3_OFFSET_BYTES_GRANULARITY_SHIFT);
- return std::make_pair(initial_l3_cut, initial_l3_offset);
-}
-
-static Expected<ContextSwitchConfigActionPtr> parse_action(const ProtoHEFAction &proto_action,
- const SupportedFeatures &supported_features)
-{
- switch (proto_action.action_case()) {
- case ProtoHEFAction::kWriteDataCcw:\
- {
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.write_data_ccw().cfg_channel_index()), HAILO_INVALID_HEF,
- "Invalid cfg channel index");
- const auto config_stream_index = static_cast<uint8_t>(proto_action.write_data_ccw().cfg_channel_index());
-
- auto data = Buffer::create(
- reinterpret_cast<const uint8_t*>(proto_action.write_data_ccw().data().data()),
- proto_action.write_data_ccw().data().length());
- CHECK_EXPECTED(data);
-
- return WriteDataCcwAction::create(data.release(), config_stream_index);
- }
- case ProtoHEFAction::kDisableLcu:
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.disable_lcu().cluster_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.disable_lcu().cluster_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.disable_lcu().lcu_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid lcu_index: {}", proto_action.disable_lcu().lcu_index());
- return DisableLcuAction::create(static_cast<uint8_t>(proto_action.disable_lcu().cluster_index()),
- static_cast<uint8_t>(proto_action.disable_lcu().lcu_index()));
- case ProtoHEFAction::kEnableLcu:
- {
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_lcu().cluster_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.enable_lcu().cluster_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_lcu().lcu_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid lcu_index: {}.", proto_action.enable_lcu().lcu_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(proto_action.enable_lcu().lcu_kernel_done_address()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid lcu_kernel_done_address: {}.", proto_action.enable_lcu().lcu_kernel_done_address());
-
- auto support_multi_networks = supported_features.multi_network_support;
- auto network_index = static_cast<uint8_t>((support_multi_networks) ? proto_action.enable_lcu().network_index() : 0);
-
- const auto cluster_index = static_cast<uint8_t>(proto_action.enable_lcu().cluster_index());
- const auto lcu_index = static_cast<uint8_t>(proto_action.enable_lcu().lcu_index());
- const auto kernel_done_address = static_cast<uint16_t>(proto_action.enable_lcu().lcu_kernel_done_address());
- const auto kernel_done_count = static_cast<uint32_t>(proto_action.enable_lcu().lcu_kernel_done_count());
-
- return EnableLcuAction::create(cluster_index, lcu_index, network_index, kernel_done_address,
- kernel_done_count);
- }
- case ProtoHEFAction::kEnableSequencer:
- {
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_sequencer().cluster_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.enable_sequencer().cluster_index());
-
- // TODO: Remove when impolemeted in the hef.proto
- uint64_t l2_offset_0 = 0;
- uint64_t l2_offset_1 = 0;
- // TODO: Change the CONTEXT_SWITCH__add_enable_sequencer_proto_action func to receive 4 'l2_offset' params
- l2_offset_0 |= (uint64_t)(proto_action.enable_sequencer().l2_write_0());
- l2_offset_0 |= ((uint64_t)(proto_action.enable_sequencer().l2_write_1()) << 32);
- l2_offset_1 |= (uint64_t)(proto_action.enable_sequencer().l2_write_2());
- l2_offset_1 |= ((uint64_t)(proto_action.enable_sequencer().l2_write_3()) << 32);
-
- uint8_t initial_l3_cut = 0;
- uint16_t initial_l3_offset = 0;
- if (proto_action.enable_sequencer().initial_l3_info().includes_initial_l3_info()) {
- const auto &initial_l3_info = proto_action.enable_sequencer().initial_l3_info();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(initial_l3_info.initial_l3_index()), HAILO_INVALID_HEF,
- "Initial l3 cut {} is out of range", initial_l3_info.initial_l3_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(initial_l3_info.initial_l3_offset()), HAILO_INVALID_HEF,
- "Initial l3 offset {} is out of range", initial_l3_info.initial_l3_offset());
- initial_l3_cut = static_cast<uint8_t>(initial_l3_info.initial_l3_index());
- initial_l3_offset = static_cast<uint16_t>(initial_l3_info.initial_l3_offset());
- }
- else {
- // Legacy mode should work only on hailo8
- std::tie(initial_l3_cut, initial_l3_offset) = old_hef_parse_initial_l3(proto_action.enable_sequencer().initial_l3_legacy());
- }
-
- return EnableSequencerAction::create(
- static_cast<uint8_t>(proto_action.enable_sequencer().cluster_index()),
- initial_l3_cut, initial_l3_offset,
- proto_action.enable_sequencer().active_apu_bitmap(),
- proto_action.enable_sequencer().active_ia_bitmap(),
- proto_action.enable_sequencer().active_sc_bitmap(),
- proto_action.enable_sequencer().active_l2_bitmap(),
- l2_offset_0,
- l2_offset_1);
- }
- case ProtoHEFAction::kNone:
- return NoneAction::create();
-
- case ProtoHEFAction::kWaitForSeqeuncer:
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.wait_for_seqeuncer().cluster_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.wait_for_seqeuncer().cluster_index());
-
- return WaitForSequencerAction::create(
- static_cast<uint8_t>(proto_action.wait_for_seqeuncer().cluster_index()));
-
- case ProtoHEFAction::kAllowInputDataflow:
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.allow_input_dataflow().sys_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid sys_index: {}.", proto_action.allow_input_dataflow().sys_index());
- return AllowInputDataflowAction::create(
- static_cast<uint8_t>(proto_action.allow_input_dataflow().sys_index()));
-
- case ProtoHEFAction::kWaitForModuleConfigDone:
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.wait_for_module_config_done().index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid index: {}", proto_action.wait_for_module_config_done().index());
- return WaitForModuleConfigDoneAction::create(
- static_cast<uint8_t>(proto_action.wait_for_module_config_done().index()));
-
- case ProtoHEFAction::kEnableNms:
- {
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_nms().nms_unit_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid nms_unit_index: {}.", proto_action.enable_nms().nms_unit_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_nms().network_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid network_index: {}.", proto_action.enable_nms().network_index());
-
- auto support_multi_networks = supported_features.multi_network_support;
- auto network_index = static_cast<uint8_t>((support_multi_networks) ? proto_action.enable_nms().network_index() : 0);
-
- const auto nms_unit_index = static_cast<uint8_t>(proto_action.enable_nms().nms_unit_index());
-
- return EnableNmsAction::create(network_index, nms_unit_index);
- }
-
- default:
- LOGGER__ERROR("Action {} not implemented", proto_action.action_case());
- break;
- }
-
- // Default case
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-static Expected<ContextSwitchOperation> parse_operation(const ProtoHEFOperation &operation_proto,
- const SupportedFeatures &supported_features)
-{
- std::vector<ContextSwitchConfigActionPtr> actions;
- actions.reserve(operation_proto.actions_size() + 1); // +1 for the trigger action
-
- auto trigger_action = parse_trigger_action(operation_proto.trigger());
- CHECK_EXPECTED(trigger_action);
- actions.emplace_back(trigger_action.release());
-
- actions.reserve(operation_proto.actions_size());
- for (const auto &proto_action : operation_proto.actions()) {
- auto action = parse_action(proto_action, supported_features);
- CHECK_EXPECTED(action);
- actions.emplace_back(action.release());
- }
-
- return ContextSwitchOperation(std::move(actions));
-}
-
-static Expected<std::vector<ContextSwitchOperation>> parse_operations(
- const google::protobuf::RepeatedPtrField<ProtoHEFOperation> &operations_proto,
- const SupportedFeatures &supported_features)
-{
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(operations_proto.size()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid operations_count: {}.", operations_proto.size());
- std::vector<ContextSwitchOperation> operations;
- operations.reserve(operations_proto.size());
- for (const auto &operation_proto : operations_proto) {
- auto operation = parse_operation(operation_proto, supported_features);
- CHECK_EXPECTED(operation);
- operations.emplace_back(operation.release());
- }
- return operations;
-}
-
-static hailo_status update_parsing_info(uint8_t cfg_index, uint32_t data_length, ConfigBufferInfoMap &results)
-{
- CHECK(cfg_index < CONTROL_PROTOCOL__MAX_CFG_CHANNELS, HAILO_INVALID_HEF, "Invalid cfg_index");
-
- if (contains(results, cfg_index)) {
- results.at(cfg_index).push_back(data_length);
- return HAILO_SUCCESS;
- }
-
- // If we got here, the current cfg_index's info is parsed for the first time
- results.emplace(cfg_index, std::vector<uint32_t>(1, data_length));
- return HAILO_SUCCESS;
-}
-
-static Expected<ConfigBufferInfoMap> get_config_buffer_info(
- const google::protobuf::RepeatedPtrField<ProtoHEFOperation> &operations)
-{
- auto status = HAILO_UNINITIALIZED;
- ConfigBufferInfoMap results;
-
- for (const auto &operation : operations) {
- for (const auto &action : operation.actions()) {
- if (ProtoHEFAction::kWriteDataCcw == action.action_case()) {
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(action.write_data_ccw().cfg_channel_index()), HAILO_INVALID_HEF,
- "Invalid cfg index {}", action.write_data_ccw().cfg_channel_index());
- status = update_parsing_info(static_cast<uint8_t>(action.write_data_ccw().cfg_channel_index()),
- static_cast<uint32_t>(action.write_data_ccw().data().length()), results);
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
- }
- }
- return results;
-}
-
-Expected<PreliminaryContextMetadata> HefUtils::parse_preliminary_context(const ProtoHEFPreliminaryConfig &preliminary_proto,
- const SupportedFeatures &supported_features)
-{
- auto operations = parse_operations(preliminary_proto.operation(), supported_features);
- CHECK_EXPECTED(operations);
-
- auto config_buffer_infos = get_config_buffer_info(preliminary_proto.operation());
- CHECK_EXPECTED(config_buffer_infos);
-
- return PreliminaryContextMetadata(operations.release(), config_buffer_infos.release());
-}
-
-Expected<ContextMetadata> HefUtils::parse_single_dynamic_context(const ProtoHEFCoreOpMock &core_op,
- const ProtoHEFContext &context_proto, uint8_t context_index, const SupportedFeatures &supported_features)
-{
- auto operations = parse_operations(context_proto.operations(), supported_features);
- CHECK_EXPECTED(operations);
-
- auto config_buffer_infos = get_config_buffer_info(context_proto.operations());
- CHECK_EXPECTED(config_buffer_infos);
-
- ContextMetadata context_metadata(operations.release(), config_buffer_infos.release());
-
- for (const auto &edge_layer : context_proto.metadata().edge_layers()) {
- if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
- edge_layer.context_switch_info().edge_connection_type()) {
- auto status = fill_boundary_layers_info(core_op, context_index, edge_layer,
- supported_features, context_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__INTERMEDIATE ==
- edge_layer.context_switch_info().edge_connection_type()) {
- auto status = fill_inter_context_layers_info(core_op, context_index, edge_layer,
- supported_features, context_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR ==
- edge_layer.context_switch_info().edge_connection_type()) {
- auto status = fill_ddr_layers_info(core_op, context_index, edge_layer,
- supported_features, context_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
- }
-
- auto status = check_ddr_pairs_match(context_metadata.get_ddr_input_layers(), context_metadata.get_ddr_output_layers(),
- context_index);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
-
- return context_metadata;
-}
-
-static hailo_status validate_unique_boundary_names(const std::vector<ContextMetadata> &contexts_metadata)
-{
- std::unordered_set<std::string> names;
- for (const auto &context_metadata : contexts_metadata) {
- for (const auto &layer_info : context_metadata.get_boundary_input_layers()) {
- CHECK(names.find(layer_info.name) == names.end(), HAILO_INVALID_HEF,
- "Layer name should be unique. name '{}' appears more than once", layer_info.name);
- names.insert(layer_info.name);
- }
-
- for (const auto &layer_info : context_metadata.get_boundary_output_layers()) {
- CHECK(names.find(layer_info.name) == names.end(), HAILO_INVALID_HEF,
- "Layer name should be unique. name '{}' appears more than once", layer_info.name);
- names.insert(layer_info.name);
- }
- }
- return HAILO_SUCCESS;
-}
-
-Expected<std::vector<ContextMetadata>> HefUtils::parse_dynamic_contexts(const ProtoHEFCoreOpMock &core_op, const SupportedFeatures &supported_features)
-{
- std::vector<ContextMetadata> contexts_metadata;
- for (uint8_t context_index = 0; context_index < core_op.contexts.size(); context_index++) {
- auto &context_proto = core_op.contexts[context_index];
- auto context_metadata = parse_single_dynamic_context(core_op, context_proto, context_index, supported_features);
- CHECK_EXPECTED(context_metadata);
- contexts_metadata.emplace_back(context_metadata.release());
- }
-
- const auto status = validate_unique_boundary_names(contexts_metadata);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return contexts_metadata;
-}
-
-Expected<hailo_nms_info_t> HefUtils::parse_proto_nms_info(const ProtoHEFNmsInfo &proto_nms_info)
-{
- hailo_nms_info_t nms_info = {};
- nms_info.number_of_classes = static_cast<uint32_t>(proto_nms_info.number_of_classes());
- nms_info.bbox_size = static_cast<uint32_t>(proto_nms_info.bbox_size());
- nms_info.max_bboxes_per_class = static_cast<uint32_t>(proto_nms_info.max_output_size());
- nms_info.chunks_per_frame = static_cast<uint32_t>(proto_nms_info.input_division_factor());
- if (nms_info.chunks_per_frame == 0) {
- // Old hef, use default value 1
- nms_info.chunks_per_frame = 1;
- }
- nms_info.is_defused = static_cast<bool>(proto_nms_info.is_defused());
- nms_info.defuse_info.class_group_index =
- static_cast<uint32_t>(proto_nms_info.defuse_info().class_group_index());
-
- CHECK_AS_EXPECTED(nms_info.defuse_info.class_group_index < HailoRTCommon::MAX_DEFUSED_LAYER_COUNT,
- HAILO_INVALID_HEF, "class_group_index from HEF is bigger than {}!", HailoRTCommon::MAX_DEFUSED_LAYER_COUNT);
-
- const std::string &original_name = proto_nms_info.defuse_info().original_name();
- CHECK_AS_EXPECTED(HAILO_MAX_STREAM_NAME_SIZE >= (original_name.length() + 1), HAILO_INTERNAL_FAILURE,
- "original_name field '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE including the null terminated character)",
- original_name);
- strncpy(nms_info.defuse_info.original_name, original_name.c_str(), original_name.length() + 1);
- return nms_info;
-}
-
-Expected<LayerInfo> HefUtils::get_boundary_layer_info(const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features)
-{
- // We parse only boundary layers for user usage
- CHECK_AS_EXPECTED(
- ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY == layer.context_switch_info().edge_connection_type(),
- HAILO_INTERNAL_FAILURE, "get_layer_info can be called only on boundary layers");
-
- LayerInfo result = {};
- const auto direction =
- (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST == layer.direction()) ?
- HAILO_D2H_STREAM : HAILO_H2D_STREAM;
- auto support_multi_networks = supported_features.multi_network_support;
- auto network_index = static_cast<uint8_t>((support_multi_networks) ? layer.network_index() : 0);
- auto partial_network_name = HefUtils::get_partial_network_name_by_index(core_op, network_index, supported_features);
- CHECK_EXPECTED(partial_network_name);
- const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer);
- if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()) {
- // TODO: return LayerInfo
- auto status = fill_layer_info(layer.layer_info(), layer.context_switch_info().edge_connection_type(),
- core_op, direction, hw_padding_supported, context_index, partial_network_name.value(), network_index, result);
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()) {
- // TODO: return LayerInfo
- auto status = fill_mux_info(layer.layer_mux(), layer.context_switch_info().edge_connection_type(),
- core_op, direction, hw_padding_supported, context_index, partial_network_name.value(), network_index, result);
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else {
- LOGGER__ERROR("Invalid layer type");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-
- result.direction = (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST ==
- layer.direction()) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
-
- if (layer.has_pad_index()) {
- result.pad_index = layer.pad_index();
- }
-
- return result;
-}
-
-static Expected<ConnectedContextInfo> parse_connected_context_info(
- const ProtoHEFConnectedContextInfo &connected_context_proto)
-{
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(connected_context_proto.sys_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid connected_sys_index: {}.", connected_context_proto.sys_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(connected_context_proto.engine_id()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid engine_id: {}. in connected_contexts", connected_context_proto.engine_id());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(connected_context_proto.index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid connected_context_index: {}.", connected_context_proto.index());
-
- ConnectedContextInfo connected_context{};
- connected_context.context_index = static_cast<uint8_t>(connected_context_proto.index());
- connected_context.stream_index = static_cast<uint8_t>(connected_context_proto.sys_index());
- connected_context.dma_engine_index = static_cast<uint8_t>(connected_context_proto.engine_id());
- return connected_context;
-}
-
-Expected<LayerInfo> HefUtils::get_inter_context_layer_info(const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features)
-{
- LayerInfo result = {};
- CHECK_AS_EXPECTED(PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type(), HAILO_INVALID_HEF, "Inter-context layer can't be mux.");
-
- result.type = LayerType::INTER_CONTEXT;
- auto support_multi_networks = supported_features.multi_network_support;
- result.network_index = static_cast<uint8_t>((support_multi_networks) ? layer.network_index() : 0);
- auto partial_network_name = HefUtils::get_partial_network_name_by_index(core_op, result.network_index, supported_features);
- CHECK_EXPECTED(partial_network_name);
- result.network_name = HefUtils::get_network_name(core_op, partial_network_name.release());
- result.context_index = context_index;
- const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer);
- result.name = layer.layer_info().name();
- auto nn_stream_config_exp = HefConfigurator::parse_nn_stream_config(layer.layer_info().edge_layer_base(),
- hw_padding_supported, layer.context_switch_info().edge_connection_type());
- CHECK_EXPECTED(nn_stream_config_exp);
- result.nn_stream_config = nn_stream_config_exp.release();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().sys_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid sys_index: {}.", layer.layer_info().edge_layer_base().sys_index());
- result.stream_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().sys_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().engine_id()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid engine_id: {}.", layer.layer_info().edge_layer_base().engine_id());
- result.dma_engine_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().engine_id());
-
- result.max_shmifo_size = layer.layer_info().edge_layer_base().max_shmifo_size();
-
- result.direction = (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST ==
- layer.direction()) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
-
- // HRT-7201 - The system supports one src and multiple dstinations. Right now we're saving only one dstination
- CHECK_AS_EXPECTED(layer.context_switch_info().connected_contexts_size() >= 1, HAILO_INVALID_HEF,
- "Inter context layer info must contain connected_context");
- auto connected_context = parse_connected_context_info(layer.context_switch_info().connected_contexts(0));
- CHECK_EXPECTED(connected_context);
- result.connected_context_info = connected_context.release();
-
- return result;
-}
-
-Expected<LayerInfo> HefUtils::get_ddr_layer_info(const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features)
-{
- LayerInfo result = {};
- CHECK_AS_EXPECTED(PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type(), HAILO_INVALID_HEF, "DDR layer can't be mux.");
-
- result.type = LayerType::DDR;
-
- auto support_multi_networks = supported_features.multi_network_support;
- result.network_index = static_cast<uint8_t>((support_multi_networks) ? layer.network_index() : 0);
- auto partial_network_name = HefUtils::get_partial_network_name_by_index(core_op, result.network_index, supported_features);
- CHECK_EXPECTED(partial_network_name);
- result.network_name = HefUtils::get_network_name(core_op, partial_network_name.release());
- result.context_index = context_index;
- const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer);
- result.name = layer.layer_info().name();
- auto nn_stream_config_exp = HefConfigurator::parse_nn_stream_config(layer.layer_info().edge_layer_base(),
- hw_padding_supported, layer.context_switch_info().edge_connection_type());
- CHECK_EXPECTED(nn_stream_config_exp);
- result.nn_stream_config = nn_stream_config_exp.release();
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().sys_index()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid sys_index: {}.", layer.layer_info().edge_layer_base().sys_index());
- result.stream_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().sys_index());
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().engine_id()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid engine_id: {}.", layer.layer_info().edge_layer_base().engine_id());
- result.dma_engine_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().engine_id());
- result.max_shmifo_size = layer.layer_info().edge_layer_base().max_shmifo_size();
-
- CHECK_AS_EXPECTED(layer.context_switch_info().connected_contexts_size() == 1, HAILO_INVALID_HEF,
- "Only single connected context is supported on DDR channels");
- auto connected_context = parse_connected_context_info(layer.context_switch_info().connected_contexts(0));
- CHECK_EXPECTED(connected_context);
- CHECK_AS_EXPECTED(context_index == connected_context->context_index,
- HAILO_INVALID_HEF, "for ddr layer, connected_context_index must be same to the edge layer's context");
- result.connected_context_info = connected_context.release();
-
- result.direction = (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST ==
- layer.direction()) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
-
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(layer.layer_info().edge_layer_base().core_buffers_per_frame()), HAILO_INVALID_HEF,
- "Failed to parse HEF. Invalid core_buffers_per_frame: {}.", layer.layer_info().edge_layer_base().core_buffers_per_frame());
- result.ddr_info.total_buffers_per_frame = static_cast<uint16_t>(layer.layer_info().edge_layer_base().core_buffers_per_frame());
-
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(layer.context_switch_info().buffers()), HAILO_INVALID_HEF,
- "calculated number of transfers for DDR buffer is out of UINT16_T range");
- result.ddr_info.min_buffered_rows = static_cast<uint16_t>(layer.context_switch_info().buffers());
-
- return result;
-}
-
-Expected<std::vector<std::string>> HefUtils::get_sorted_output_names(const ProtoHEFCoreOpMock &core_op)
-{
- if (core_op.fused_layers_metadata.network_has_fused_layers()) {
- return std::vector<std::string>(std::begin(core_op.fused_layers_metadata.updated_sorted_output_names()),
- std::end(core_op.fused_layers_metadata.updated_sorted_output_names()));
- } else if (0 != core_op.sorted_outputs_order.size()) {
- // For backwards compatibility before we've added updated_sorted_output_names
- return std::vector<std::string>(std::begin(core_op.sorted_outputs_order),
- std::end(core_op.sorted_outputs_order));
- } else {
- // For backwards compatibility before we've added this field
- uint32_t number_of_contexts = core_op.contexts.size();
- const auto& context_metadata = core_op.contexts[number_of_contexts - 1].metadata();
-
- CHECK_AS_EXPECTED(0 < context_metadata.sorted_outputs_order_size(), HAILO_INVALID_HEF,
- "Sorted output names is not set up in the HEF.");
-
- return std::vector<std::string>(std::begin(context_metadata.sorted_outputs_order()),
- std::end(context_metadata.sorted_outputs_order()));
- }
-}
-
-Expected<std::string> HefUtils::get_partial_network_name_by_index(const ProtoHEFCoreOpMock &core_op, uint8_t network_index,
- const SupportedFeatures &supported_features)
-{
- if (supported_features.multi_network_support) {
- CHECK_AS_EXPECTED(network_index < core_op.networks_names.size(), HAILO_INVALID_ARGUMENT,
- "Requested name for network_index={}, however there are only {} networks in the network group",
- network_index, core_op.networks_names.size());
- return std::string(core_op.networks_names[network_index]);
- } else {
- auto partial_network_name = core_op.network_group_metadata.network_group_name();
- return partial_network_name;
- }
-}
-
-std::string HefUtils::get_network_group_name(const ProtoHEFNetworkGroup &net_group, const SupportedFeatures &/*supported_features*/)
-{
- if (!net_group.partial_network_groups().empty()) {
- return net_group.partial_network_groups(0).network_group().network_group_metadata().network_group_name();
- }
- return net_group.network_group_metadata().network_group_name();
-}
-
-std::string HefUtils::get_network_name(const std::string &net_group_name, const std::string &partial_network_name)
-{
- return net_group_name + HAILO_DEFAULT_NETWORK_NAME_QUALIFIER + partial_network_name;
-}
-
-std::string HefUtils::get_network_name(const ProtoHEFCoreOpMock &core_op, const std::string &partial_network_name)
-{
- return HefUtils::get_network_name(core_op.network_group_metadata.network_group_name(), partial_network_name);
-}
-
-Expected<std::shared_ptr<ProtoHEFCoreOpMock>> Hef::Impl::get_core_op_per_arch(const ProtoHEFCoreOpMock &core_op,
- ProtoHEFHwArch hef_arch, hailo_device_architecture_t device_arch, uint32_t partial_clusters_layout_bitmap)
-{
- if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == hef_arch) {
- // Hailo8 can work with Hailo8L configurations. in that case we choose one of the configurations
- for (auto &partial_core_op : core_op.partial_core_ops) {
- if (partial_clusters_layout_bitmap == partial_core_op->layout.partial_clusters_layout_bitmap()
- || (HAILO_ARCH_HAILO8 == device_arch)) {
- return std::make_shared<ProtoHEFCoreOpMock>(*(partial_core_op->core_op));
- }
- }
- LOGGER__ERROR("There is no matching partial_clusters_layout_bitmap configuration in the given HEF");
- return make_unexpected(HAILO_INVALID_HEF);
- } else {
- return std::make_shared<ProtoHEFCoreOpMock>(core_op);
- }
-}
-
-Expected<std::vector<std::string>> Hef::Impl::get_sorted_output_names(const std::string &net_group_name)
-{
- if (m_supported_features.hailo_net_flow) {
- std::vector<std::string> res;
- for (const auto &net_group : m_groups) {
- auto curr_name = HefUtils::get_network_group_name(*net_group, m_supported_features);
- if (curr_name == net_group_name) {
- res.reserve(net_group->sorted_outputs_order().size());
- for (auto &name : net_group->sorted_outputs_order()) {
- res.push_back(name);
- }
- return res;
- }
- }
- LOGGER__ERROR("Did not find network group of name {}", net_group_name);
- return make_unexpected(HAILO_INVALID_HEF);
- }
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- auto res = network_group_metadata->get_sorted_output_names();
- return res;
-}
-
-static Expected<WriteMemoryInfo> parse_ccw_buffer(const std::string &ccw_buffer)
-{
- WriteMemoryInfo write_memory_info = {};
- CHECK_AS_EXPECTED(ccw_buffer.size() > CCW_DATA_OFFSET, HAILO_INVALID_HEF, "ccw buffer is too small");
- CcwHeader *header = (CcwHeader*)(ccw_buffer.data());
-
- uint32_t words_count = header->words_count + 1;
- auto data_length = words_count * CCW_BYTES_IN_WORD;
- write_memory_info.address = header->address;
-
- // Validation for ccw size
- size_t expected_ccw_data_length = (ccw_buffer.length() - CCW_DATA_OFFSET);
- if (0 != (words_count % 2)) {
- expected_ccw_data_length -= CCW_BYTES_IN_WORD;
- }
- CHECK_AS_EXPECTED(data_length == expected_ccw_data_length, HAILO_INVALID_HEF,
- "Invalid ccw buffer was parsed from HEF");
-
- auto data_buff = Buffer::create(reinterpret_cast<const uint8_t*>(ccw_buffer.data() + CCW_DATA_OFFSET), data_length);
- CHECK_EXPECTED(data_buff);
- write_memory_info.data = data_buff.release();
-
- return write_memory_info;
-}
-
-/* HcpConfigNetworkGroup funcs */
-
-Expected<std::vector<WriteMemoryInfo>> Hef::Impl::create_single_context_network_group_config(const ProtoHEFPreliminaryConfig& proto_config)
-{
- std::vector<WriteMemoryInfo> config_buffers;
-
- for (const auto &operation : proto_config.operation()) {
- switch (operation.trigger().trigger_case()) {
- case ProtoHEFTrigger::kTriggerNone: {
- break;
- }
- default: {
- LOGGER__ERROR("Triggers different from 'ProtoHEFTriggerNone' are not supported");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
- }
-
- for (const auto &action : operation.actions()) {
- switch (action.action_case()) {
- case ProtoHEFAction::kNone: {
- break;
- }
- case ProtoHEFAction::kWriteData: {
- WriteMemoryInfo write_memory_info = {};
- write_memory_info.address = static_cast<uint32_t>(action.write_data().address());
- auto data_buff = Buffer::create(
- reinterpret_cast<const uint8_t*>(action.write_data().data().data()),
- action.write_data().data().length());
- CHECK_EXPECTED(data_buff);
- write_memory_info.data = data_buff.release();
- config_buffers.emplace_back(std::move(write_memory_info));
- break;
- }
- case ProtoHEFAction::kWriteDataCcw: {
- auto config_buffer = parse_ccw_buffer(action.write_data_ccw().data());
- CHECK_EXPECTED(config_buffer);
- config_buffers.emplace_back(config_buffer.release());
- break;
- }
- case ProtoHEFAction::kDisableLcu: {
- // We ignore this action. the lcu_disable will happen in the nn_core reset before configuring specific network_group
- break;
- }
- case ProtoHEFAction::kEnableLcu: {
- WriteMemoryInfo write_memory_info = {};
- write_memory_info.address = action.enable_lcu().lcu_enable_address();
- auto data_buff = Buffer::create(ENABLE_LCU_CONTROL_WORD, sizeof(ENABLE_LCU_CONTROL_WORD));
- CHECK_EXPECTED(data_buff);
- write_memory_info.data = data_buff.release();
- config_buffers.emplace_back(std::move(write_memory_info));
- break;
- }
- case ProtoHEFAction::kAllowInputDataflow: {
- case ProtoHEFAction::kWaitForModuleConfigDone:
- // We ignore the 'wait_for_interrupt' actions. After writing the configurations we can be sure everything is configured and dont need to wait for interrupts
- break;
- }
- case ProtoHEFAction::kWaitForSeqeuncer: {
- case ProtoHEFAction::kEnableSequencer:
- LOGGER__ERROR("Parsing error. Sequencer related actions are not supported over Ethernet. "
- "If you use the Ethernet interface, please disable the Sequencer in the Dataflow Compiler (SDK) and then re-create the HEF. "
- "Disabling the Sequencer is done using the hef_param command in the model script (ALLS file). "
- "See the Dataflow Compiler user guide for more information.");
- return make_unexpected(HAILO_INVALID_HEF);
- }
- default: {
- LOGGER__ERROR("Invalid action");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
- }
- }
- }
-
- return config_buffers;
-}
-
-ProtoHEFHwArch Hef::Impl::get_device_arch()
-{
- return m_header.hw_arch();
-}
-
-Expected<float64_t> Hef::Impl::get_bottleneck_fps(const std::string &net_group_name)
-{
- auto core_op = get_core_op_by_net_group_name(net_group_name);
- CHECK_EXPECTED(core_op);
- return core_op.value()->network_group_metadata.bottleneck_fps();
-}
-
-bool Hef::Impl::contains_ddr_layers(const ProtoHEFCoreOpMock& core_op)
-{
- for (auto &context : core_op.contexts) {
- for (auto &layer : context.metadata().edge_layers()) {
- if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR ==
- layer.context_switch_info().edge_connection_type()) {
- return true;
- }
- }
- }
- return false;
-}
-
-Expected<std::vector<std::string>> Hef::Impl::get_stream_names_from_vstream_name(const std::string &vstream_name,
- const std::string &net_group_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- return network_group_metadata->get_stream_names_from_vstream_name(vstream_name);
-}
-
-Expected<std::vector<std::string>> Hef::Impl::get_vstream_names_from_stream_name(const std::string &stream_name,
- const std::string &net_group_name)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- return network_group_metadata->get_vstream_names_from_stream_name(stream_name);
-}
-
-Expected<std::string> Hef::Impl::get_vstream_name_from_original_name_mux(const std::string &original_name, const ProtoHefEdge &layer)
-{
- switch (layer.edge_case()) {
- case ProtoHefEdge::kLayerInfo:
- for (const auto &name : layer.layer_info().original_names()) {
- if (original_name == name) {
- return std::string(layer.layer_info().name());
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
- case ProtoHefEdge::kLayerMux:
- for (const auto &pred : layer.layer_mux().predecessors()) {
- auto res = get_vstream_name_from_original_name_mux(original_name, pred);
- if (res) {
- return std::move(res.value());
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
- default:
- LOGGER__ERROR("Invalid layer type");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-}
-
-Expected<std::string> Hef::Impl::get_vstream_name_from_original_name(const std::string &original_name,
- const std::string &net_group_name)
-{
- auto core_op = get_core_op_by_net_group_name(net_group_name);
- CHECK_EXPECTED(core_op);
-
- std::string results;
-
- for (const auto &context : core_op.value()->contexts) {
- for (const auto &layer_info : context.metadata().edge_layers()) {
- if ((is_h2d_boundary_info_layer(layer_info)) || (is_d2h_boundary_info_layer(layer_info))) {
- for (auto &name : layer_info.layer_info().original_names()) {
- if (original_name == name) {
- CHECK_AS_EXPECTED(results.empty(), HAILO_INVALID_HEF, "Original name {} appears more than once in the HEF.", original_name);
- results = std::string(layer_info.layer_info().name());
- }
- }
- } else if(is_d2h_boundary_mux_layer(layer_info)) {
- for (auto &pred : layer_info.layer_mux().predecessors()) {
- auto stream_name = get_vstream_name_from_original_name_mux(original_name, pred);
- if (stream_name) {
- CHECK_AS_EXPECTED(results.empty(), HAILO_INVALID_HEF, "Original name {} appears more than once in the HEF.", original_name);
- results = stream_name.value();
- }
- }
- }
- }
- }
- CHECK_AS_EXPECTED(!results.empty(), HAILO_NOT_FOUND);
- return results;
-}
-
-Expected<std::vector<std::string>> Hef::Impl::get_original_names_from_vstream_name_mux(const std::string &vstream_name, const ProtoHefEdge &layer)
-{
- switch (layer.edge_case()) {
- case ProtoHefEdge::kLayerInfo:
- {
- if (vstream_name == layer.layer_info().name()) {
- std::vector<std::string> results;
- for (const auto &name : layer.layer_info().original_names()) {
- results.push_back(name);
- }
- return results;
- }
- return make_unexpected(HAILO_NOT_FOUND);
- }
- case ProtoHefEdge::kLayerMux:
- for (const auto &pred : layer.layer_mux().predecessors()) {
- auto res = get_original_names_from_vstream_name_mux(vstream_name, pred);
- if (res) {
- return std::move(res.value());
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
- default:
- LOGGER__ERROR("Invalid layer type");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-}
-
-Expected<std::vector<std::string>> Hef::Impl::get_original_names_from_vstream_name(const std::string &vstream_name,
- const std::string &net_group_name)
-{
- auto copre_op = get_core_op_by_net_group_name(net_group_name);
- CHECK_EXPECTED(copre_op);
-
- std::vector<std::string> results;
-
- for (const auto &context : copre_op.value()->contexts) {
- for (const auto &layer_info : context.metadata().edge_layers()) {
- if ((is_h2d_boundary_info_layer(layer_info)) || (is_d2h_boundary_info_layer(layer_info))) {
- if (vstream_name == layer_info.layer_info().name()) {
- for (const auto &name : layer_info.layer_info().original_names()) {
- results.push_back(name);
- }
- return results;
- }
- } else if(is_d2h_boundary_mux_layer(layer_info)) {
- for (const auto &pred : layer_info.layer_mux().predecessors()) {
- auto names = get_original_names_from_vstream_name_mux(vstream_name, pred);
- if (names) {
- return std::move(names.value());
- }
- }
- }
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-hailo_status Hef::Impl::validate_core_op_unique_layer_names(const ProtoHEFCoreOpMock &core_op)
-{
- std::set<std::string> edge_layer_names;
- std::string layer_name;
- for (auto &context : core_op.contexts) {
- for (auto &layer : context.metadata().edge_layers()) {
- // TODO: remove check for boundary layer after fix will be pushed in SDK
- if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
- layer.context_switch_info().edge_connection_type()) {
- if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()) {
- layer_name = layer.layer_info().name();
- } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()) {
- layer_name = layer.layer_mux().name();
- } else {
- LOGGER__ERROR("Invalid layer type.");
- return HAILO_INVALID_HEF;
- }
- CHECK(!contains(edge_layer_names, layer_name), HAILO_INVALID_HEF,
- "layer_name should be unique. {} appears more than once in the given network_group.",
- layer_name);
- edge_layer_names.insert(layer_name);
- }
- }
- }
- return HAILO_SUCCESS;
-}
-
-std::vector<std::string> Hef::get_network_groups_names()
-{
- return pimpl->get_network_groups_names();
-}
-
-Expected<NetworkGroupsParamsMap> Hef::create_configure_params(hailo_stream_interface_t stream_interface)
-{
- NetworkGroupsParamsMap results;
- for (const auto &name : pimpl->get_network_groups_names()) {
- auto params = create_configure_params(stream_interface, name);
- CHECK_EXPECTED(params);
- results.emplace(std::make_pair(name, params.release()));
- }
- return results;
-}
-
-Expected<ConfigureNetworkParams> Hef::create_configure_params(hailo_stream_interface_t stream_interface, const std::string &network_group_name)
-{
- return pimpl->create_configure_params(stream_interface, network_group_name);
-}
-
-Expected<NetworkGroupsParamsMap> Hef::create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params)
-{
- NetworkGroupsParamsMap results;
- for (const auto &name : pimpl->get_network_groups_names()) {
- auto params = create_configure_params_mipi_input(output_interface, mipi_params, name);
- CHECK_EXPECTED(params);
- results.emplace(std::make_pair(name, params.release()));
- }
- return results;
-}
-
-
-Expected<ConfigureNetworkParams> Hef::create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params, const std::string &network_group_name)
-{
- return pimpl->create_configure_params_mipi_input(output_interface, mipi_params, network_group_name);
-}
-
-std::string Hef::hash() const
-{
- const auto &md5 = pimpl->md5();
- const bool LOWERCASE = false;
- return StringUtils::to_hex_string(md5, MD5_DIGEST_LENGTH, LOWERCASE);
-}
-
-std::vector<std::string> Hef::Impl::get_network_groups_names()
-{
- std::vector<std::string> results;
- results.reserve(m_groups.size());
-
- for (const auto &net_group : m_groups) {
- auto &network_group_name = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
- net_group->partial_network_groups(0).network_group().network_group_metadata().network_group_name()
- : net_group->network_group_metadata().network_group_name();
- results.push_back(network_group_name);
- }
- return results;
-}
-
-Expected<std::vector<hailo_network_group_info_t>> Hef::get_network_groups_infos()
-{
- return pimpl->get_network_groups_infos();
-}
-
-Expected<std::vector<hailo_network_group_info_t>> Hef::Impl::get_network_groups_infos()
-{
- std::vector<hailo_network_group_info_t> results;
- results.reserve(m_core_ops_per_group.size());
-
- for (const auto &group_name_to_core_op : m_core_ops_per_group) {
- const auto &core_op = group_name_to_core_op.second[0];
- hailo_network_group_info_t info = {};
- auto &network_group_name = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
- core_op.partial_core_ops[0]->core_op->network_group_metadata.network_group_name()
- : core_op.network_group_metadata.network_group_name();
- CHECK_AS_EXPECTED(HAILO_MAX_NETWORK_GROUP_NAME_SIZE >= (network_group_name.length() + 1), HAILO_INTERNAL_FAILURE,
- "The network group '{}' has a too long name (max is HAILO_MAX_NETWORK_GROUP_NAME_SIZE)", network_group_name);
- strncpy(info.name, network_group_name.c_str(), network_group_name.length() + 1);
- info.is_multi_context = (1 < core_op.contexts.size());
- results.push_back(info);
- }
- return results;
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> Hef::make_input_vstream_params(
- const std::string &name, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
- uint32_t queue_size)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->make_input_vstream_params(network_pair.value().first, network_pair.value().second, quantized, format_type,
- timeout_ms, queue_size);
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> Hef::Impl::make_input_vstream_params(
- const std::string &net_group_name, const std::string &network_name, bool quantized,
- hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- std::map<std::string, hailo_vstream_params_t> input_vstreams_params;
- auto status = fill_missing_input_vstream_params_with_default(net_group_name,
- network_name, input_vstreams_params, quantized, format_type, timeout_ms, queue_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return input_vstreams_params;
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> Hef::make_output_vstream_params(
- const std::string &name, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
- uint32_t queue_size)
-{
- auto network_pair = pimpl->get_network_group_and_network_name(name);
- CHECK_EXPECTED(network_pair);
-
- return pimpl->make_output_vstream_params(network_pair.value().first, network_pair.value().second, quantized, format_type,
- timeout_ms, queue_size);
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> Hef::Impl::make_output_vstream_params(
- const std::string &net_group_name, const std::string &network_name, bool quantized,
- hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- std::map<std::string, hailo_vstream_params_t> output_vstreams_params;
- auto status = fill_missing_output_vstream_params_with_default(net_group_name,
- network_name, output_vstreams_params, quantized, format_type, timeout_ms, queue_size);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return output_vstreams_params;
-}
-
-hailo_status Hef::Impl::fill_missing_input_vstream_params_with_default(const std::string &net_group_name,
- const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED_AS_STATUS(network_group_metadata);
- auto input_vstream_infos = network_group_metadata->get_input_vstream_infos(network_name);
- CHECK_EXPECTED_AS_STATUS(input_vstream_infos);
-
- return fill_missing_vstream_params_with_default(input_vstreams_params, input_vstream_infos.value(),
- quantized, format_type, timeout_ms, queue_size);
-}
-
-hailo_status Hef::Impl::fill_missing_output_vstream_params_with_default(const std::string &net_group_name,
- const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &output_vstream_params,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED_AS_STATUS(network_group_metadata);
- auto output_vstream_infos = network_group_metadata->get_output_vstream_infos(network_name);
- CHECK_EXPECTED_AS_STATUS(output_vstream_infos);
-
- return fill_missing_vstream_params_with_default(output_vstream_params, output_vstream_infos.value(),
- quantized, format_type, timeout_ms, queue_size);
-}
-
-hailo_status Hef::Impl::fill_missing_vstream_params_with_default(std::map<std::string, hailo_vstream_params_t> &vstream_params,
- std::vector<hailo_vstream_info_t> &vstream_infos, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
- uint32_t queue_size)
-{
- hailo_format_flags_t flags = static_cast<hailo_format_flags_t>(HAILO_FORMAT_FLAGS_NONE);
- if (quantized) {
- flags = static_cast<hailo_format_flags_t>(flags | HAILO_FORMAT_FLAGS_QUANTIZED);
- }
- for (const auto &vstream_info : vstream_infos) {
- std::string vstream_name(vstream_info.name);
- if (contains(vstream_params, vstream_name)) {
- continue;
- }
- hailo_vstream_params_t params{};
- params.user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
- params.user_buffer_format.type = format_type;
- params.user_buffer_format.flags = flags;
- params.timeout_ms = timeout_ms;
- params.queue_size = queue_size;
- vstream_params.insert(std::make_pair(vstream_name, params));
- }
- return HAILO_SUCCESS;
-}
-
-Expected<ConfigureNetworkParams> Hef::Impl::create_configure_params(hailo_stream_interface_t stream_interface, const std::string &network_group_name)
-{
- auto params = HailoRTDefaults::get_configure_params();
- auto stream_params_by_name = create_stream_parameters_by_name(network_group_name, stream_interface);
- CHECK_EXPECTED(stream_params_by_name);
- params.stream_params_by_name = stream_params_by_name.release();
- auto network_params_by_name = create_network_parameters_by_name(network_group_name);
- CHECK_EXPECTED(network_params_by_name);
- params.network_params_by_name = network_params_by_name.release();
-
- return params;
-}
-
-Expected<ConfigureNetworkParams> Hef::Impl::create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params, const std::string &network_group_name)
-{
- auto params = HailoRTDefaults::get_configure_params();
- auto stream_params_by_name = create_stream_parameters_by_name_mipi_input(network_group_name, output_interface, mipi_params);
- CHECK_EXPECTED(stream_params_by_name);
- params.stream_params_by_name = stream_params_by_name.release();
- auto network_params_by_name = create_network_parameters_by_name(network_group_name);
- CHECK_EXPECTED(network_params_by_name);
- params.network_params_by_name = network_params_by_name.release();
-
- return params;
-}
-
-Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::create_stream_parameters_by_name(
- const std::string &net_group_name, hailo_stream_interface_t stream_interface)
-{
- auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(network_group_name_pair);
- auto net_group_name_str = network_group_name_pair->first;
-
- return pimpl->create_stream_parameters_by_name(net_group_name_str, stream_interface);
-}
-
-Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::Impl::create_stream_parameters_by_name(
- const std::string &net_group_name, hailo_stream_interface_t stream_interface)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- std::map<std::string, hailo_stream_parameters_t> results;
- for (auto &input_layer : network_group_metadata->get_input_layer_infos()) {
- auto params = HailoRTDefaults::get_stream_parameters(stream_interface, HAILO_H2D_STREAM);
- CHECK_EXPECTED(params);
- results.emplace(std::make_pair(input_layer.name, params.release()));
- }
- for (auto &output_layer : network_group_metadata->get_output_layer_infos()) {
- auto params = HailoRTDefaults::get_stream_parameters(stream_interface, HAILO_D2H_STREAM);
- CHECK_EXPECTED(params);
- results.emplace(std::make_pair(output_layer.name, params.release()));
- }
-
- return results;
-}
-
-Expected<std::map<std::string, hailo_network_parameters_t>> Hef::create_network_parameters_by_name(
- const std::string &net_group_name)
-{
- return pimpl->create_network_parameters_by_name(net_group_name);
-}
-
-Expected<std::map<std::string, hailo_network_parameters_t>> Hef::Impl::create_network_parameters_by_name(
- const std::string &net_group_name)
-{
- auto core_op = get_core_op_by_net_group_name(net_group_name);
- CHECK_EXPECTED(core_op);
-
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- std::map<std::string, hailo_network_parameters_t> results;
-
- if (network_group_metadata->supported_features().multi_network_support) {
- CHECK_AS_EXPECTED((core_op.value()->networks_names.size() != 0), HAILO_INTERNAL_FAILURE,
- "Hef support multiple networks, but no networks found in the proto");
- for (const auto &partial_network_name : core_op.value()->networks_names) {
- auto network_name = HefUtils::get_network_name(net_group_name, partial_network_name);
- auto params = HailoRTDefaults::get_network_parameters();
- results.emplace(std::make_pair(network_name, params));
- }
- } else {
- /* For hefs without the "networks_names" field, build default network name with default params */
- auto params = HailoRTDefaults::get_network_parameters();
- auto network_name = HailoRTDefaults::get_network_name(net_group_name);
- results.emplace(std::make_pair(network_name, params));
- }
-
- return results;
-}
-
-Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::create_stream_parameters_by_name_mipi_input(
- const std::string &net_group_name, hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params)
-{
- auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
- CHECK_EXPECTED(network_group_name_pair);
- auto net_group_name_str = network_group_name_pair->first;
-
- return pimpl->create_stream_parameters_by_name_mipi_input(net_group_name_str, output_interface, mipi_params);
-}
-
-Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::Impl::create_stream_parameters_by_name_mipi_input(
- const std::string &net_group_name, hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params)
-{
- auto network_group_metadata = get_network_group_metadata(net_group_name);
- CHECK_EXPECTED(network_group_metadata);
-
- std::map<std::string, hailo_stream_parameters_t> results;
- for (auto &input_layer : network_group_metadata->get_input_layer_infos()) {
- hailo_stream_parameters_t params = {};
- params.direction = HAILO_H2D_STREAM;
- params.stream_interface = HAILO_STREAM_INTERFACE_MIPI;
- params.mipi_input_params = mipi_params;
- results.emplace(std::make_pair(input_layer.name, params));
- }
- for (auto &output_layer : network_group_metadata->get_output_layer_infos()) {
- auto params = HailoRTDefaults::get_stream_parameters(output_interface, HAILO_D2H_STREAM);
- CHECK_EXPECTED(params);
- results.emplace(std::make_pair(output_layer.name, params.release()));
- }
-
- return results;
-}
-
-} /* namespace hailort */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hef.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/core_op_metadata.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/context_switch_actions.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file context_switch_actions.cpp
+ * @brief Contains classes represents the context switch action (Actions found in the HEFs
+ * and action sent to the fw).
+ **/
+
+#include "context_switch_actions.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+
+#include "context_switch_defs.h"
+
+namespace hailort
+{
+
+
+static uint8_t pack_vdma_channel_id(const vdma::ChannelId &channel_id)
+{
+ return static_cast<uint8_t>(channel_id.channel_index |
+ (channel_id.engine_index << CONTEXT_SWITCH_DEFS__PACKED_VDMA_CHANNEL_ID__ENGINE_INDEX_SHIFT));
+}
+
+static uint8_t pack_lcu_id(uint8_t cluster_index, uint8_t lcu_index)
+{
+ return static_cast<uint8_t>(lcu_index |
+ (cluster_index << CONTEXT_SWITCH_DEFS__PACKED_LCU_ID_CLUSTER_INDEX_SHIFT));
+}
+
+ContextSwitchConfigAction::ContextSwitchConfigAction(Type type) :
+ ContextSwitchConfigAction(type, CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT)
+{}
+
+ContextSwitchConfigAction::ContextSwitchConfigAction(Type type, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_list_type) :
+ m_type(type),
+ m_action_list_type(action_list_type)
+{}
+
+Expected<std::vector<Buffer>> ContextSwitchConfigAction::serialize(const ContextResources &context_resources) const
+{
+ CHECK_AS_EXPECTED(m_action_list_type < CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, HAILO_INTERNAL_FAILURE,
+ "Action cannot be serialized");
+
+ auto header = serialize_header();
+ CHECK_EXPECTED(header);
+
+ auto params = serialize_params(context_resources);
+ CHECK_EXPECTED(params);
+
+ auto serialized_action = Buffer::create(header->size() + params->size());
+ CHECK_EXPECTED(serialized_action);
+
+ std::copy(header->begin(), header->end(), serialized_action->data());
+ std::copy(params->begin(), params->end(), serialized_action->data() + header->size());
+
+ std::vector<Buffer> buffers;
+ buffers.emplace_back(serialized_action.release());
+ return buffers;
+}
+
+ContextSwitchConfigAction::Type ContextSwitchConfigAction::get_type() const
+{
+ return m_type;
+}
+
+CONTEXT_SWITCH_DEFS__ACTION_TYPE_t ContextSwitchConfigAction::get_action_list_type() const
+{
+ return m_action_list_type;
+}
+
+Expected<Buffer> ContextSwitchConfigAction::serialize_header() const
+{
+ CHECK_AS_EXPECTED(m_action_list_type != CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, HAILO_INTERNAL_FAILURE,
+ "Action cannot be serialized");
+ CONTEXT_SWITCH_DEFS__common_action_header_t header{};
+ header.action_type = m_action_list_type;
+ header.time_stamp = CONTEXT_SWITCH_DEFS__TIMESTAMP_INIT_VALUE;
+ return Buffer::create(reinterpret_cast<uint8_t*>(&header), sizeof(header));
+}
+
+Expected<ContextSwitchConfigActionPtr> NoneAction::create()
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) NoneAction());
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+NoneAction::NoneAction() :
+ ContextSwitchConfigAction(Type::None)
+{}
+
+Expected<std::vector<Buffer>> NoneAction::serialize(const ContextResources &) const
+{
+ // Do nothing
+ return std::vector<Buffer>();
+}
+
+bool NoneAction::supports_repeated_block() const
+{
+ // None actions are ignored and aren't written to the FW's action list. Hence they can't be part of a repeated block.
+ return false;
+}
+
+Expected<Buffer> NoneAction::serialize_params(const ContextResources &) const
+{
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateConfigChannelAction::create(uint8_t config_stream_index,
+ const vdma::ChannelId &channel_id, const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateConfigChannelAction(config_stream_index,
+ channel_id, host_buffer_info));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateConfigChannelAction::ActivateConfigChannelAction(uint8_t config_stream_index,
+ const vdma::ChannelId &channel_id, const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
+ ContextSwitchConfigAction(Type::ActivateConfigChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CFG_CHANNEL),
+ m_config_stream_index(config_stream_index),
+ m_channel_id(channel_id),
+ m_host_buffer_info(host_buffer_info)
+{}
+
+bool ActivateConfigChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be in repeated block for easier debug.
+ return false;
+}
+
+Expected<Buffer> ActivateConfigChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_cfg_channel_t params{};
+ params.config_stream_index = m_config_stream_index;
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.host_buffer_info = m_host_buffer_info;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> DeactivateConfigChannelAction::create(uint8_t config_stream_index,
+ const vdma::ChannelId &channel_id)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DeactivateConfigChannelAction(config_stream_index,
+ channel_id));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+DeactivateConfigChannelAction::DeactivateConfigChannelAction(uint8_t config_stream_index,
+ const vdma::ChannelId &channel_id) :
+ ContextSwitchConfigAction(Type::DeactivateConfigChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_DEACTIVATE_CFG_CHANNEL),
+ m_config_stream_index(config_stream_index),
+ m_channel_id(channel_id)
+{}
+
+bool DeactivateConfigChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be in repeated block for easier debug.
+ return false;
+}
+
+Expected<Buffer> DeactivateConfigChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__deactivate_cfg_channel_t params{};
+ params.config_stream_index = m_config_stream_index;
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WriteDataCcwAction::create(
+ Buffer &&data, uint8_t config_stream_index, size_t total_ccw_burst)
+{
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(total_ccw_burst), HAILO_INVALID_HEF,
+ "Too many ccw burst {} (must fit in uint16)", total_ccw_burst);
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WriteDataCcwAction(
+ std::move(data), config_stream_index, static_cast<uint16_t>(total_ccw_burst)));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WriteDataCcwAction::WriteDataCcwAction(Buffer &&data, uint8_t config_stream_index, uint16_t total_ccw_burst) :
+ ContextSwitchConfigAction(Type::WriteDataCcw),
+ m_data(std::move(data)),
+ m_config_stream_index(config_stream_index),
+ m_total_ccw_burst(total_ccw_burst)
+{}
+
+Expected<std::vector<Buffer>> WriteDataCcwAction::serialize(const ContextResources &) const
+{
+ // WriteDataCcwActions aren't written to the FW's action list.
+ LOGGER__ERROR("Can't serialize WriteDataCcwAction");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+bool WriteDataCcwAction::supports_repeated_block() const
+{
+ // WriteDataCcwActions aren't written to the FW's action list. Hence they can't be part of a repeated block.
+ return false;
+}
+
+Expected<Buffer> WriteDataCcwAction::serialize_params(const ContextResources &) const
+{
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+Expected<ContextSwitchConfigActionPtr> AddCcwBurstAction::create(uint8_t config_stream_index, uint16_t ccw_bursts)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) AddCcwBurstAction(config_stream_index, ccw_bursts));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+AddCcwBurstAction::AddCcwBurstAction(uint8_t config_stream_index, uint16_t ccw_bursts) :
+ ContextSwitchConfigAction(Type::AddCcwBurst, CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CCW_BURSTS),
+ m_config_stream_index(config_stream_index),
+ m_ccw_bursts(ccw_bursts)
+{}
+
+Expected<Buffer> AddCcwBurstAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__fetch_ccw_bursts_action_data_t params{};
+ params.ccw_bursts = m_ccw_bursts;
+ params.config_stream_index = m_config_stream_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+bool AddCcwBurstAction::supports_repeated_block() const
+{
+ return false;
+}
+
+Expected<ContextSwitchConfigActionPtr> FetchCfgChannelDescriptorsAction::create(const vdma::ChannelId &channel_id,
+ size_t desc_count)
+{
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(desc_count), HAILO_INVALID_OPERATION,
+ "On cfg with continuous mode, max descriptors size must fit in uint16_t");
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) FetchCfgChannelDescriptorsAction(channel_id,
+ static_cast<uint16_t>(desc_count)));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+FetchCfgChannelDescriptorsAction::FetchCfgChannelDescriptorsAction(const vdma::ChannelId &channel_id, uint16_t desc_count) :
+ ContextSwitchConfigAction(Type::FetchCfgChannelDescriptors, CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CFG_CHANNEL_DESCRIPTORS),
+ m_channel_id(channel_id),
+ m_desc_count(desc_count)
+{}
+
+bool FetchCfgChannelDescriptorsAction::supports_repeated_block() const
+{
+ return true;
+}
+
+Expected<Buffer> FetchCfgChannelDescriptorsAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__fetch_cfg_channel_descriptors_action_data_t params{};
+ params.descriptors_count = m_desc_count;
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> StartBurstCreditsTaskAction::create()
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) StartBurstCreditsTaskAction());
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+StartBurstCreditsTaskAction::StartBurstCreditsTaskAction() :
+ ContextSwitchConfigAction(Type::StartBurstCreditsTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START)
+{}
+
+bool StartBurstCreditsTaskAction::supports_repeated_block() const
+{
+ // We don't support repeated blocks for this action, since only one is added per group of consecutive
+ // TriggerNewDataFromDataInput actions.
+ return false;
+}
+
+Expected<Buffer> StartBurstCreditsTaskAction::serialize_params(const ContextResources &) const
+{
+ return Buffer::create(0);
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitForNetworkGroupChangeAction::create()
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForNetworkGroupChangeAction());
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitForNetworkGroupChangeAction::WaitForNetworkGroupChangeAction() :
+ ContextSwitchConfigAction(Type::WaitForNetworkGroupChange,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_APPLICATION_CHANGE_INTERRUPT)
+{}
+
+bool WaitForNetworkGroupChangeAction::supports_repeated_block() const
+{
+ // Only one network group change action exists.
+ return false;
+}
+
+Expected<Buffer> WaitForNetworkGroupChangeAction::serialize_params(const ContextResources &) const
+{
+ return Buffer::create(0);
+}
+
+
+Expected<ContextSwitchConfigActionPtr> RepeatedAction::create(
+ std::vector<ContextSwitchConfigActionPtr> &&actions)
+{
+ CHECK_AS_EXPECTED(!actions.empty(), HAILO_INVALID_HEF, "Invalid sub-action count (must be greater than zero)");
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(actions.size()), HAILO_INTERNAL_FAILURE,
+ "Too many repeated actions {}", actions.size());
+ CHECK_AS_EXPECTED(actions[0]->supports_repeated_block(), HAILO_INVALID_HEF,
+ "Invalid repeated sub-action type (Action does not support repeated)");
+ CHECK_AS_EXPECTED(actions[0]->get_action_list_type() != CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT, HAILO_INVALID_HEF,
+ "Invalid repeated sub-action type (can't have sub-action with type CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT)");
+
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) RepeatedAction(std::move(actions)));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+RepeatedAction::RepeatedAction(std::vector<ContextSwitchConfigActionPtr> &&actions) :
+ ContextSwitchConfigAction(Type::AddRepeated, CONTEXT_SWITCH_DEFS__ACTION_TYPE_REPEATED_ACTION),
+ m_actions(std::move(actions)),
+ m_sub_action_type(m_actions[0]->get_action_list_type())
+{}
+
+bool RepeatedAction::supports_repeated_block() const
+{
+ // RepeatedActions can't be part of a repeated block themselves
+ return false;
+}
+
+Expected<Buffer> RepeatedAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__repeated_action_header_t params{};
+ params.sub_action_type = m_sub_action_type;
+ params.last_executed = 0;
+ params.count = static_cast<uint8_t>(m_actions.size());
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<std::vector<Buffer>> RepeatedAction::serialize(const ContextResources &context_resources) const
+{
+ std::vector<Buffer> buffers;
+ buffers.reserve(m_actions.size() + 1); // Contains the repeated header and all of the actions
+
+ auto repeated_header = ContextSwitchConfigAction::serialize(context_resources);
+ CHECK_EXPECTED(repeated_header);
+ CHECK_AS_EXPECTED(repeated_header->size() == 1, HAILO_INTERNAL_FAILURE, "Repeated action header should contain one buffer");
+ buffers.emplace_back(std::move(repeated_header->at(0)));
+
+ for (const auto &action : m_actions) {
+ assert(action->get_action_list_type() == m_sub_action_type);
+ auto action_buffer = action->serialize_params(context_resources);
+ CHECK_EXPECTED(action_buffer);
+ buffers.emplace_back(action_buffer.release());
+ }
+
+ return buffers;
+}
+
+Expected<ContextSwitchConfigActionPtr> DisableLcuAction::create(uint8_t cluster_index, uint8_t lcu_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DisableLcuAction(cluster_index, lcu_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+DisableLcuAction::DisableLcuAction(uint8_t cluster_index, uint8_t lcu_index) :
+ ContextSwitchConfigAction(Type::DisableLcu, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DISABLE_LCU),
+ m_cluster_index(cluster_index),
+ m_lcu_index(lcu_index)
+{}
+
+bool DisableLcuAction::supports_repeated_block() const
+{
+ return true;
+}
+
+Expected<Buffer> DisableLcuAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__disable_lcu_action_data_t params{};
+ params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitForLcuAction::create(uint8_t cluster_index, uint8_t lcu_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForLcuAction(cluster_index, lcu_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitForLcuAction::WaitForLcuAction(uint8_t cluster_index, uint8_t lcu_index) :
+ ContextSwitchConfigAction(Type::WaitForLcu, CONTEXT_SWITCH_DEFS__ACTION_TYPE_LCU_INTERRUPT),
+ m_cluster_index(cluster_index),
+ m_lcu_index(lcu_index)
+{}
+
+bool WaitForLcuAction::supports_repeated_block() const
+{
+ // Wait actions shouldn't be repeated (for easier debugging)
+ return false;
+}
+
+Expected<Buffer> WaitForLcuAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__lcu_interrupt_data_t params{};
+ params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> EnableLcuAction::create(uint8_t cluster_index, uint8_t lcu_index,
+ uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count)
+{
+ const auto is_default = (CONTEXT_SWITCH_DEFS__ENABLE_LCU_DEFAULT_KERNEL_ADDRESS == kernel_done_address) &&
+ (CONTEXT_SWITCH_DEFS__ENABLE_LCU_DEFAULT_KERNEL_COUNT == kernel_done_count);
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableLcuAction(cluster_index, lcu_index,
+ network_index, kernel_done_address, kernel_done_count, is_default));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+CONTEXT_SWITCH_DEFS__ACTION_TYPE_t EnableLcuAction::get_enable_lcu_action_type(bool is_default)
+{
+ return is_default ? CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_LCU_DEFAULT :
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_LCU_NON_DEFAULT;
+}
+
+ContextSwitchConfigAction::Type EnableLcuAction::get_enable_lcu_type(bool is_default)
+{
+ return is_default ? Type::EnableLcuDefault : Type::EnableLcuNonDefault;
+}
+
+EnableLcuAction::EnableLcuAction(uint8_t cluster_index, uint8_t lcu_index,
+ uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count, bool is_default) :
+ ContextSwitchConfigAction(get_enable_lcu_type(is_default), get_enable_lcu_action_type(is_default)),
+ m_cluster_index(cluster_index),
+ m_lcu_index(lcu_index),
+ m_network_index(network_index),
+ m_kernel_done_address(kernel_done_address),
+ m_kernel_done_count(kernel_done_count),
+ m_is_default(is_default)
+{}
+
+Expected<Buffer> EnableLcuAction::serialize_params(const ContextResources &) const
+{
+ if (m_is_default) {
+ CONTEXT_SWITCH_DEFS__enable_lcu_action_default_data_t params{};
+ params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
+ params.network_index = m_network_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+ }
+ else {
+ CONTEXT_SWITCH_DEFS__enable_lcu_action_non_default_data_t params{};
+ params.packed_lcu_id = pack_lcu_id(m_cluster_index, m_lcu_index);
+ params.kernel_done_address = m_kernel_done_address;
+ params.kernel_done_count = m_kernel_done_count;
+ params.network_index = m_network_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+ }
+}
+
+bool EnableLcuAction::supports_repeated_block() const
+{
+ return true;
+}
+
+Expected<ContextSwitchConfigActionPtr> EnableSequencerAction::create(uint8_t cluster_index,
+ uint8_t initial_l3_cut, uint16_t initial_l3_offset, uint32_t active_apu, uint32_t active_ia,
+ uint64_t active_sc, uint64_t active_l2, uint64_t l2_offset_0, uint64_t l2_offset_1)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableSequencerAction(cluster_index, initial_l3_cut,
+ initial_l3_offset, active_apu, active_ia, active_sc, active_l2, l2_offset_0, l2_offset_1));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+EnableSequencerAction::EnableSequencerAction(uint8_t cluster_index, uint8_t initial_l3_cut, uint16_t initial_l3_offset,
+ uint32_t active_apu, uint32_t active_ia, uint64_t active_sc, uint64_t active_l2, uint64_t l2_offset_0,
+ uint64_t l2_offset_1) :
+ ContextSwitchConfigAction(Type::TriggerSequencer, CONTEXT_SWITCH_DEFS__ACTION_TYPE_TRIGGER_SEQUENCER),
+ m_cluster_index(cluster_index),
+ m_initial_l3_cut(initial_l3_cut),
+ m_initial_l3_offset(initial_l3_offset),
+ m_active_apu(active_apu),
+ m_active_ia(active_ia),
+ m_active_sc(active_sc),
+ m_active_l2(active_l2),
+ m_l2_offset_0(l2_offset_0),
+ m_l2_offset_1(l2_offset_1)
+{}
+
+bool EnableSequencerAction::supports_repeated_block() const
+{
+ return true;
+}
+
+Expected<Buffer> EnableSequencerAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__trigger_sequencer_action_data_t params{};
+ params.cluster_index = m_cluster_index;
+ params.sequencer_config.initial_l3_cut = m_initial_l3_cut;
+ params.sequencer_config.initial_l3_offset = m_initial_l3_offset;
+ params.sequencer_config.active_apu = m_active_apu;
+ params.sequencer_config.active_ia = m_active_ia;
+ params.sequencer_config.active_sc = m_active_sc;
+ params.sequencer_config.active_l2 = m_active_l2;
+ params.sequencer_config.l2_offset_0 = m_l2_offset_0;
+ params.sequencer_config.l2_offset_1 = m_l2_offset_1;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitForSequencerAction::create(uint8_t cluster_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForSequencerAction(cluster_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitForSequencerAction::WaitForSequencerAction(uint8_t cluster_index) :
+ ContextSwitchConfigAction(Type::WaitForSequencerDone, CONTEXT_SWITCH_DEFS__ACTION_TYPE_SEQUENCER_DONE_INTERRUPT),
+ m_cluster_index(cluster_index)
+{}
+
+bool WaitForSequencerAction::supports_repeated_block() const
+{
+ // Wait actions shouldn't be repeated (for easier debugging)
+ return false;
+}
+
+Expected<Buffer> WaitForSequencerAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__sequencer_interrupt_data_t params{};
+ params.sequencer_index = m_cluster_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> AllowInputDataflowAction::create(uint8_t stream_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) AllowInputDataflowAction(stream_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+
+AllowInputDataflowAction::AllowInputDataflowAction(uint8_t stream_index) :
+ ContextSwitchConfigAction(Type::TriggerNewDataFromDataInput,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_DATA_FROM_VDMA_CHANNEL),
+ m_stream_index(stream_index)
+{}
+
+bool AllowInputDataflowAction::supports_repeated_block() const
+{
+ // DDR threads are implemented on HailoRT so no FW action is required. Hence they can't be part of a repeated block.
+ if (Type::TriggerNewDataFromDataInputDdr == m_type) {
+ return false;
+ }
+
+ return true;
+}
+
+Expected<Buffer> AllowInputDataflowAction::serialize_params(const ContextResources &context_resources) const
+{
+ const auto edge_layer = context_resources.get_edge_layer_by_stream_index(m_stream_index);
+ CHECK_EXPECTED(edge_layer);
+
+ CONTEXT_SWITCH_DEFS__fetch_data_action_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer->channel_id);
+ params.stream_index = m_stream_index;
+ params.network_index = edge_layer->layer_info.network_index;
+ params.host_buffer_type = static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer->buffer_info.buffer_type);
+ params.periph_bytes_per_buffer = edge_layer->layer_info.nn_stream_config.periph_bytes_per_buffer;
+
+ switch (edge_layer->layer_info.type) {
+ case LayerType::BOUNDARY:
+ params.credit_type = CONTEXT_SWITCH_DEFS__CREDIT_IN_BYTES;
+ params.frame_periph_size = edge_layer->layer_info.nn_stream_config.periph_bytes_per_buffer *
+ edge_layer->layer_info.nn_stream_config.periph_buffers_per_frame;
+ break;
+ case LayerType::INTER_CONTEXT:
+ params.credit_type = CONTEXT_SWITCH_DEFS__CREDIT_IN_DESCRIPTORS;
+ params.frame_periph_size = ((edge_layer->buffer_info.bytes_in_pattern - 1) / (edge_layer->buffer_info.desc_page_size)) + 1;
+ break;
+ default:
+ LOGGER__ERROR("Invalid layer type {} for stream {}", static_cast<int>(edge_layer->layer_info.type), m_stream_index);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitForModuleConfigDoneAction::create(uint8_t module_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForModuleConfigDoneAction(module_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitForModuleConfigDoneAction::WaitForModuleConfigDoneAction(uint8_t module_index) :
+ ContextSwitchConfigAction(Type::WaitForModuleConfigDone, CONTEXT_SWITCH_DEFS__ACTION_TYPE_MODULE_CONFIG_DONE_INTERRUPT),
+ m_module_index(module_index)
+{}
+
+bool WaitForModuleConfigDoneAction::supports_repeated_block() const
+{
+ // Wait actions shouldn't be repeated (for easier debugging)
+ return false;
+}
+
+Expected<Buffer> WaitForModuleConfigDoneAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__module_config_done_interrupt_data_t params{};
+ params.module_index = m_module_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> DdrPairInfoAction::create(const vdma::ChannelId &h2d_channel_id,
+ const vdma::ChannelId &d2h_channel_id, uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DdrPairInfoAction(
+ h2d_channel_id, d2h_channel_id, network_index, descriptors_per_frame, descs_count));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+DdrPairInfoAction::DdrPairInfoAction(const vdma::ChannelId &h2d_channel_id, const vdma::ChannelId &d2h_channel_id,
+ uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count) :
+ ContextSwitchConfigAction(Type::DdrPairInfo, CONTEXT_SWITCH_DEFS__ACTION_TYPE_ADD_DDR_PAIR_INFO),
+ m_h2d_channel_id(h2d_channel_id),
+ m_d2h_channel_id(d2h_channel_id),
+ m_network_index(network_index),
+ m_descriptors_per_frame(descriptors_per_frame),
+ m_descs_count(descs_count)
+{}
+
+bool DdrPairInfoAction::supports_repeated_block() const
+{
+ return true;
+}
+
+Expected<Buffer> DdrPairInfoAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__add_ddr_pair_info_action_data_t params{};
+ params.h2d_packed_vdma_channel_id = pack_vdma_channel_id(m_h2d_channel_id);
+ params.d2h_packed_vdma_channel_id = pack_vdma_channel_id(m_d2h_channel_id);
+ params.network_index = m_network_index;
+ params.descriptors_per_frame = m_descriptors_per_frame;
+ params.programmed_descriptors_count = m_descs_count;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> StartDdrBufferingTaskAction::create()
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) StartDdrBufferingTaskAction());
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+StartDdrBufferingTaskAction::StartDdrBufferingTaskAction() :
+ ContextSwitchConfigAction(Type::StartDdrBufferingTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START)
+{}
+
+bool StartDdrBufferingTaskAction::supports_repeated_block() const
+{
+ // There should only be one "start ddr buffering task action" per context,
+ // so there's no need to support repeated blocks.
+ return false;
+}
+
+Expected<Buffer> StartDdrBufferingTaskAction::serialize_params(const ContextResources &) const
+{
+ return Buffer::create(0);
+}
+
+Expected<ContextSwitchConfigActionPtr> ResetDdrBufferingTaskAction::create()
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ResetDdrBufferingTaskAction());
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ResetDdrBufferingTaskAction::ResetDdrBufferingTaskAction() :
+ ContextSwitchConfigAction(Type::ResetDdrBufferingTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_RESET)
+{}
+
+bool ResetDdrBufferingTaskAction::supports_repeated_block() const
+{
+ // There should only be one "reset ddr buffering task action" per context at most,
+ // so there's no need to support repeated blocks.
+ return false;
+}
+
+Expected<Buffer> ResetDdrBufferingTaskAction::serialize_params(const ContextResources &) const
+{
+ return Buffer::create(0);
+}
+
+Expected<ContextSwitchConfigActionPtr> ChangeVdmaToStreamMapping::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, bool is_dummy_stream)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ChangeVdmaToStreamMapping(channel_id, stream_index,
+ is_dummy_stream));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ChangeVdmaToStreamMapping::ChangeVdmaToStreamMapping(const vdma::ChannelId &channel_id, uint8_t stream_index,
+ bool is_dummy_stream) :
+ ContextSwitchConfigAction(Type::ChangeVdmaToStreamMapping,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_VDMA_TO_STREAM_MAPPING),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_is_dummy_stream(is_dummy_stream)
+{}
+
+bool ChangeVdmaToStreamMapping::supports_repeated_block() const
+{
+ return true;
+}
+
+Expected<Buffer> ChangeVdmaToStreamMapping::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__change_vdma_to_stream_mapping_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.is_dummy_stream = m_is_dummy_stream;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitOutputTransferDoneAction::create(uint8_t stream_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitOutputTransferDoneAction(stream_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitOutputTransferDoneAction::WaitOutputTransferDoneAction(uint8_t stream_index) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::WaitOutputTransferDone, CONTEXT_SWITCH_DEFS__ACTION_TYPE_OUTPUT_CHANNEL_TRANSFER_DONE_INTERRUPT),
+ m_stream_index(stream_index)
+{}
+
+bool WaitOutputTransferDoneAction::supports_repeated_block() const
+{
+ // Wait actions shouldn't be repeated (for easier debugging)
+ return false;
+}
+
+Expected<Buffer> WaitOutputTransferDoneAction::serialize_params(const ContextResources &context_resources) const
+{
+ const auto edge_layer = context_resources.get_edge_layer_by_stream_index(m_stream_index);
+ CHECK_EXPECTED(edge_layer);
+
+ CONTEXT_SWITCH_DEFS__vdma_dataflow_interrupt_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer->channel_id);
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> OpenBoundaryInputChannelAction::create(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) OpenBoundaryInputChannelAction(channel_id,
+ host_buffer_info));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+OpenBoundaryInputChannelAction::OpenBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::OpenBoundaryInputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL),
+ m_channel_id(channel_id),
+ m_host_buffer_info(host_buffer_info)
+{}
+
+bool OpenBoundaryInputChannelAction::supports_repeated_block() const
+{
+ // Open boundary actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> OpenBoundaryInputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.host_buffer_info = m_host_buffer_info;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> OpenBoundaryOutputChannelAction::create(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) OpenBoundaryOutputChannelAction(channel_id,
+ host_buffer_info));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+OpenBoundaryOutputChannelAction::OpenBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::OpenBoundaryOutputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL),
+ m_channel_id(channel_id),
+ m_host_buffer_info(host_buffer_info)
+{}
+
+bool OpenBoundaryOutputChannelAction::supports_repeated_block() const
+{
+ // Open boundary actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> OpenBoundaryOutputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__open_boundary_output_channel_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.host_buffer_info = m_host_buffer_info;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+// TODO HRT-8705: remove nn_stream_config struct (that this function won't be needed)
+static CONTEXT_SWITCH_DEFS__stream_reg_info_t parse_nn_config(const CONTROL_PROTOCOL__nn_stream_config_t &nn_config)
+{
+ CONTEXT_SWITCH_DEFS__stream_reg_info_t reg_info{};
+ reg_info.core_bytes_per_buffer = nn_config.core_bytes_per_buffer;
+ reg_info.core_buffers_per_frame = nn_config.core_buffers_per_frame;
+ reg_info.feature_padding_payload = nn_config.feature_padding_payload;
+ reg_info.buffer_padding_payload = nn_config.buffer_padding_payload;
+ reg_info.buffer_padding = nn_config.buffer_padding;
+ reg_info.periph_bytes_per_buffer = nn_config.periph_bytes_per_buffer;
+ reg_info.periph_buffers_per_frame = nn_config.periph_buffers_per_frame;
+ return reg_info;
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateBoundaryInputChannelAction::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateBoundaryInputChannelAction(channel_id,
+ stream_index, nn_stream_config, host_buffer_info, initial_credit_size));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateBoundaryInputChannelAction::ActivateBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateBoundaryInputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_BOUNDARY_INPUT),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_nn_stream_config(nn_stream_config),
+ m_host_buffer_info(host_buffer_info),
+ m_initial_credit_size(initial_credit_size)
+{}
+
+bool ActivateBoundaryInputChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ActivateBoundaryInputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_boundary_input_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.stream_reg_info = parse_nn_config(m_nn_stream_config);
+ params.host_buffer_info = m_host_buffer_info;
+ params.initial_credit_size = m_initial_credit_size;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateBoundaryOutputChannelAction::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateBoundaryOutputChannelAction(channel_id,
+ stream_index, nn_stream_config, host_buffer_info));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateBoundaryOutputChannelAction::ActivateBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateBoundaryOutputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_BOUNDARY_OUTPUT),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_nn_stream_config(nn_stream_config),
+ m_host_buffer_info(host_buffer_info)
+{}
+
+bool ActivateBoundaryOutputChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ActivateBoundaryOutputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.stream_reg_info = parse_nn_config(m_nn_stream_config);
+ params.host_buffer_info = m_host_buffer_info;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateInterContextInputChannelAction::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateInterContextInputChannelAction(channel_id,
+ stream_index, nn_stream_config, host_buffer_info, initial_credit_size));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateInterContextInputChannelAction::ActivateInterContextInputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateInterContextInputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_INTER_CONTEXT_INPUT),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_nn_stream_config(nn_stream_config),
+ m_host_buffer_info(host_buffer_info),
+ m_initial_credit_size(initial_credit_size)
+{}
+
+bool ActivateInterContextInputChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ActivateInterContextInputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_inter_context_input_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.stream_reg_info = parse_nn_config(m_nn_stream_config);
+ params.host_buffer_info = m_host_buffer_info;
+ params.initial_credit_size = m_initial_credit_size;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateInterContextOutputChannelAction::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateInterContextOutputChannelAction(channel_id,
+ stream_index, network_index, nn_stream_config, host_buffer_info));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateInterContextOutputChannelAction::ActivateInterContextOutputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateInterContextOutputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_INTER_CONTEXT_OUTPUT),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_network_index(network_index),
+ m_nn_stream_config(nn_stream_config),
+ m_host_buffer_info(host_buffer_info)
+{}
+
+bool ActivateInterContextOutputChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ActivateInterContextOutputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_inter_context_output_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.network_index = m_network_index;
+ params.stream_reg_info = parse_nn_config(m_nn_stream_config);
+ params.host_buffer_info = m_host_buffer_info;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateDdrInputChannelAction::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
+ const vdma::ChannelId &connected_d2h_channel_id)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateDdrInputChannelAction(channel_id,
+ stream_index, nn_stream_config, host_buffer_info, initial_credit_size, connected_d2h_channel_id));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateDdrInputChannelAction::ActivateDdrInputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
+ const vdma::ChannelId &connected_d2h_channel_id) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateDdrInputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_DDR_BUFFER_INPUT),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_nn_stream_config(nn_stream_config),
+ m_host_buffer_info(host_buffer_info),
+ m_initial_credit_size(initial_credit_size),
+ m_connected_d2h_channel_id(connected_d2h_channel_id)
+{}
+
+bool ActivateDdrInputChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ActivateDdrInputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_ddr_buffer_input_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.stream_reg_info = parse_nn_config(m_nn_stream_config);
+ params.host_buffer_info = m_host_buffer_info;
+ params.initial_credit_size = m_initial_credit_size;
+ params.connected_d2h_packed_vdma_channel_id = pack_vdma_channel_id(m_connected_d2h_channel_id);
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ActivateDdrOutputChannelAction::create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateDdrOutputChannelAction(channel_id,
+ stream_index, nn_stream_config, host_buffer_info, buffered_rows_count));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ActivateDdrOutputChannelAction::ActivateDdrOutputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateDdrOutputChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_DDR_BUFFER_OUTPUT),
+ m_channel_id(channel_id),
+ m_stream_index(stream_index),
+ m_nn_stream_config(nn_stream_config),
+ m_host_buffer_info(host_buffer_info),
+ m_buffered_rows_count(buffered_rows_count)
+{}
+
+bool ActivateDdrOutputChannelAction::supports_repeated_block() const
+{
+ // Activate actions shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ActivateDdrOutputChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__activate_ddr_buffer_output_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.stream_index = m_stream_index;
+ params.stream_reg_info = parse_nn_config(m_nn_stream_config);
+ params.host_buffer_info = m_host_buffer_info;
+ params.buffered_rows_count = m_buffered_rows_count;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ValidateChannelAction::create(const EdgeLayer &edge_layer)
+{
+ const bool is_inter_context = (LayerType::INTER_CONTEXT == edge_layer.layer_info.type);
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ValidateChannelAction(edge_layer.channel_id,
+ edge_layer.layer_info.direction, is_inter_context,
+ static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
+ edge_layer.layer_info.max_shmifo_size));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+ValidateChannelAction::ValidateChannelAction(const vdma::ChannelId &channel_id,
+ hailo_stream_direction_t stream_direction, bool is_inter_context,
+ CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ValidateChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL),
+ m_channel_id(channel_id),
+ m_stream_direction(stream_direction),
+ m_is_inter_context(is_inter_context),
+ m_host_buffer_type(host_buffer_type),
+ m_initial_credit_size(initial_credit_size)
+{}
+
+bool ValidateChannelAction::supports_repeated_block() const
+{
+ // Validate action shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> ValidateChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__validate_vdma_channel_action_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
+ static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
+ static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
+ params.is_inter_context = m_is_inter_context;
+ params.host_buffer_type = static_cast<uint8_t>(m_host_buffer_type);
+ params.initial_credit_size = m_initial_credit_size;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> DeactivateChannelAction::create(const EdgeLayer &edge_layer)
+{
+ const bool is_inter_context = (LayerType::INTER_CONTEXT == edge_layer.layer_info.type);
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DeactivateChannelAction(edge_layer.channel_id,
+ edge_layer.layer_info.direction, is_inter_context,
+ static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
+ edge_layer.layer_info.max_shmifo_size));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+DeactivateChannelAction::DeactivateChannelAction(const vdma::ChannelId &channel_id,
+ hailo_stream_direction_t stream_direction, bool is_inter_context,
+ CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::DeactivateChannel,
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_DEACTIVATE_VDMA_CHANNEL),
+ m_channel_id(channel_id),
+ m_stream_direction(stream_direction),
+ m_is_inter_context(is_inter_context),
+ m_host_buffer_type(host_buffer_type),
+ m_initial_credit_size(initial_credit_size)
+{}
+
+bool DeactivateChannelAction::supports_repeated_block() const
+{
+ // Deactivate action shouldn't be repeated (for easier debugging).
+ return false;
+}
+
+Expected<Buffer> DeactivateChannelAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__deactivate_vdma_channel_action_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+ params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
+ static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
+ static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
+ params.is_inter_context = m_is_inter_context;
+ params.host_buffer_type = static_cast<uint8_t>(m_host_buffer_type);
+ params.initial_credit_size = m_initial_credit_size;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitDmaIdleAction::create(uint8_t stream_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitDmaIdleAction(stream_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitDmaIdleAction::WaitDmaIdleAction(uint8_t stream_index) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::WaitDmaIdle, CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_DMA_IDLE_ACTION),
+ m_stream_index(stream_index)
+{}
+
+bool WaitDmaIdleAction::supports_repeated_block() const
+{
+ // Wait actions shouldn't be repeated (for easier debugging)
+ return false;
+}
+
+Expected<Buffer> WaitDmaIdleAction::serialize_params(const ContextResources &context_resources) const
+{
+ const auto edge_layer = context_resources.get_edge_layer_by_stream_index(m_stream_index);
+ CHECK_EXPECTED(edge_layer);
+
+ CONTEXT_SWITCH_DEFS__wait_dma_idle_data_t params{};
+ params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer->channel_id);
+ params.is_inter_context = static_cast<uint8_t>(LayerType::INTER_CONTEXT == edge_layer->layer_info.type);
+ params.stream_index = m_stream_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> WaitNmsIdleAction::create(uint8_t aggregator_index,
+ uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index, uint8_t pred_cluster_ob_interface,
+ uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitNmsIdleAction(aggregator_index,
+ pred_cluster_ob_index, pred_cluster_ob_cluster_index, pred_cluster_ob_interface, succ_prepost_ob_index,
+ succ_prepost_ob_interface));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+WaitNmsIdleAction::WaitNmsIdleAction(uint8_t aggregator_index, uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index,
+ uint8_t pred_cluster_ob_interface, uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::WaitNmsIdle, CONTEXT_SWITCH_DEFS__ACTION_TYPE_WAIT_FOR_NMS),
+ m_aggregator_index(aggregator_index),
+ m_pred_cluster_ob_index(pred_cluster_ob_index),
+ m_pred_cluster_ob_cluster_index(pred_cluster_ob_cluster_index),
+ m_pred_cluster_ob_interface(pred_cluster_ob_interface),
+ m_succ_prepost_ob_index(succ_prepost_ob_index),
+ m_succ_prepost_ob_interface(succ_prepost_ob_interface)
+{}
+
+bool WaitNmsIdleAction::supports_repeated_block() const
+{
+ // Wait actions shouldn't be repeated (for easier debugging)
+ return false;
+}
+
+Expected<Buffer> WaitNmsIdleAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__wait_nms_data_t params{};
+ params.aggregator_index = m_aggregator_index;
+ params.pred_cluster_ob_index = m_pred_cluster_ob_index;
+ params.pred_cluster_ob_cluster_index = m_pred_cluster_ob_cluster_index;
+ params.pred_cluster_ob_interface = m_pred_cluster_ob_interface;
+ params.succ_prepost_ob_index = m_succ_prepost_ob_index;
+ params.succ_prepost_ob_interface = m_succ_prepost_ob_interface;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> EnableNmsAction::create(uint8_t nms_unit_index, uint8_t network_index)
+{
+ auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableNmsAction(nms_unit_index, network_index));
+ CHECK_AS_EXPECTED((nullptr != result), HAILO_OUT_OF_HOST_MEMORY);
+ return result;
+}
+
+EnableNmsAction::EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index) :
+ ContextSwitchConfigAction(ContextSwitchConfigAction::Type::EnableNms, CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS),
+ m_nms_unit_index(nms_unit_index),
+ m_network_index(network_index)
+{}
+
+Expected<Buffer> EnableNmsAction::serialize_params(const ContextResources &) const
+{
+ CONTEXT_SWITCH_DEFS__enable_nms_action_t params{};
+ params.nms_unit_index = m_nms_unit_index;
+ params.network_index = m_network_index;
+ return Buffer::create(reinterpret_cast<uint8_t*>(¶ms), sizeof(params));
+}
+
+bool EnableNmsAction::supports_repeated_block() const
+{
+ return true;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file context_switch_actions.hpp
+ * @brief Contains classes represents the context switch action (Actions found in the HEFs
+ * and action sent to the fw).
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_ACTIONS_HPP_
+#define _HAILO_CONTEXT_SWITCH_ACTIONS_HPP_
+
+#include "hailo/expected.hpp"
+#include "hailo/buffer.hpp"
+
+#include "vdma/channel/channel_id.hpp"
+#include "hef/layer_info.hpp"
+
+#include "device_common/control_protocol.hpp"
+#include "context_switch_defs.h"
+
+
+namespace hailort
+{
+
+
+class ContextResources;
+struct EdgeLayer;
+
+class ContextSwitchConfigAction;
+using ContextSwitchConfigActionPtr = std::shared_ptr<ContextSwitchConfigAction>;
+class ContextSwitchConfigAction
+{
+public:
+ enum class Type
+ {
+ None,
+ ActivateConfigChannel,
+ DeactivateConfigChannel,
+ WriteDataCcw,
+ AddCcwBurst,
+ FetchCfgChannelDescriptors,
+ TriggerSequencer,
+ WaitForSequencerDone,
+ TriggerNewDataFromDataInput,
+ TriggerNewDataFromDataInputDdr,
+ EnableLcuNonDefault,
+ EnableLcuDefault,
+ DisableLcu,
+ WaitForLcu,
+ WaitForModuleConfigDone,
+ DdrPairInfo,
+ StartDdrBufferingTask,
+ ResetDdrBufferingTask,
+ AddRepeated,
+ StartBurstCreditsTask,
+ WaitForNetworkGroupChange,
+ ChangeVdmaToStreamMapping,
+ WaitOutputTransferDone,
+ OpenBoundaryInputChannel,
+ OpenBoundaryOutputChannel,
+ ActivateBoundaryInputChannel,
+ ActivateBoundaryOutputChannel,
+ ActivateInterContextInputChannel,
+ ActivateInterContextOutputChannel,
+ ActivateDdrInputChannel,
+ ActivateDdrOutputChannel,
+ ValidateChannel,
+ DeactivateChannel,
+ WaitDmaIdle,
+ WaitNmsIdle,
+ EnableNms,
+ };
+
+ ContextSwitchConfigAction(ContextSwitchConfigAction &&) = default;
+ ContextSwitchConfigAction(const ContextSwitchConfigAction &) = delete;
+ ContextSwitchConfigAction &operator=(ContextSwitchConfigAction &&) = delete;
+ ContextSwitchConfigAction &operator=(const ContextSwitchConfigAction &) = delete;
+ virtual ~ContextSwitchConfigAction() = default;
+
+ // Serialize the action a vector of buffers - each buffer is a chunk that must be sent continuously to the firmware
+ // (For example each chunk can be sub action of RepeatedAction).
+ virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources) const;
+
+ Expected<Buffer> serialize_header() const;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const = 0;
+
+ virtual bool supports_repeated_block() const = 0;
+ Type get_type() const;
+ CONTEXT_SWITCH_DEFS__ACTION_TYPE_t get_action_list_type() const;
+
+protected:
+ ContextSwitchConfigAction(Type type);
+ ContextSwitchConfigAction(Type type, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_list_type);
+
+ const Type m_type;
+ const CONTEXT_SWITCH_DEFS__ACTION_TYPE_t m_action_list_type;
+};
+
+class NoneAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create();
+ NoneAction(NoneAction &&) = default;
+ NoneAction(const NoneAction &) = delete;
+ NoneAction &operator=(NoneAction &&) = delete;
+ NoneAction &operator=(const NoneAction &) = delete;
+ virtual ~NoneAction() = default;
+
+ virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources) const override;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ NoneAction();
+};
+
+class ActivateConfigChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t config_stream_index, const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateConfigChannelAction(uint8_t config_stream_index, const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ const uint8_t m_config_stream_index;
+ const vdma::ChannelId m_channel_id;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+};
+
+class DeactivateConfigChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t config_stream_index, const vdma::ChannelId &channel_id);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ DeactivateConfigChannelAction(uint8_t config_stream_index, const vdma::ChannelId &channel_id);
+
+ const uint8_t m_config_stream_index;
+ const vdma::ChannelId m_channel_id;
+};
+
+class WriteDataCcwAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(Buffer &&data, uint8_t config_stream_index,
+ size_t total_ccw_burst);
+ WriteDataCcwAction(WriteDataCcwAction &&) = default;
+ WriteDataCcwAction(const WriteDataCcwAction &) = delete;
+ WriteDataCcwAction &operator=(WriteDataCcwAction &&) = delete;
+ WriteDataCcwAction &operator=(const WriteDataCcwAction &) = delete;
+ virtual ~WriteDataCcwAction() = default;
+
+ virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources) const override;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+ const MemoryView data() const { return MemoryView::create_const(m_data.data(), m_data.size()); }
+ uint8_t config_stream_index() const { return m_config_stream_index; }
+ uint16_t total_ccw_burst() const { return m_total_ccw_burst; }
+
+private:
+ WriteDataCcwAction(Buffer &&data, uint8_t config_stream_index,
+ uint16_t total_ccw_burst);
+
+ Buffer m_data;
+ const uint8_t m_config_stream_index;
+ const uint16_t m_total_ccw_burst;
+};
+
+class AddCcwBurstAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t config_stream_index, uint16_t ccw_bursts);
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ AddCcwBurstAction(uint8_t config_stream_index, uint16_t ccw_bursts);
+
+ const uint8_t m_config_stream_index;
+ const uint16_t m_ccw_bursts;
+};
+
+class FetchCfgChannelDescriptorsAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id, size_t desc_count);
+
+ FetchCfgChannelDescriptorsAction(FetchCfgChannelDescriptorsAction &&) = default;
+ FetchCfgChannelDescriptorsAction(const FetchCfgChannelDescriptorsAction &) = delete;
+ FetchCfgChannelDescriptorsAction &operator=(FetchCfgChannelDescriptorsAction &&) = delete;
+ FetchCfgChannelDescriptorsAction &operator=(const FetchCfgChannelDescriptorsAction &) = delete;
+ virtual ~FetchCfgChannelDescriptorsAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ FetchCfgChannelDescriptorsAction(const vdma::ChannelId &channel_id, uint16_t desc_count);
+
+ const vdma::ChannelId m_channel_id;
+ const uint16_t m_desc_count;
+};
+
+class StartBurstCreditsTaskAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create();
+
+ StartBurstCreditsTaskAction(StartBurstCreditsTaskAction &&) = default;
+ StartBurstCreditsTaskAction(const StartBurstCreditsTaskAction &) = delete;
+ StartBurstCreditsTaskAction &operator=(StartBurstCreditsTaskAction &&) = delete;
+ StartBurstCreditsTaskAction &operator=(const StartBurstCreditsTaskAction &) = delete;
+ virtual ~StartBurstCreditsTaskAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ StartBurstCreditsTaskAction();
+};
+
+class WaitForNetworkGroupChangeAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create();
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ WaitForNetworkGroupChangeAction();
+};
+
+class RepeatedAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(std::vector<ContextSwitchConfigActionPtr> &&actions);
+ RepeatedAction(RepeatedAction &&) = default;
+ RepeatedAction(const RepeatedAction &) = delete;
+ RepeatedAction &operator=(RepeatedAction &&) = delete;
+ RepeatedAction &operator=(const RepeatedAction &) = delete;
+ virtual ~RepeatedAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+ virtual Expected<std::vector<Buffer>> serialize(const ContextResources &context_resources) const override;
+
+private:
+ RepeatedAction(std::vector<ContextSwitchConfigActionPtr> &&actions);
+
+ const std::vector<ContextSwitchConfigActionPtr> m_actions;
+ const CONTEXT_SWITCH_DEFS__ACTION_TYPE_t m_sub_action_type;
+};
+
+class DisableLcuAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t lcu_index);
+ DisableLcuAction(DisableLcuAction &&) = default;
+ DisableLcuAction(const DisableLcuAction &) = delete;
+ DisableLcuAction &operator=(DisableLcuAction &&) = delete;
+ DisableLcuAction &operator=(const DisableLcuAction &) = delete;
+ virtual ~DisableLcuAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ DisableLcuAction(uint8_t cluster_index, uint8_t lcu_index);
+
+ const uint8_t m_cluster_index;
+ const uint8_t m_lcu_index;
+};
+
+
+class WaitForLcuAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t lcu_index);
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ WaitForLcuAction(uint8_t cluster_index, uint8_t lcu_index);
+
+ uint8_t m_cluster_index;
+ uint8_t m_lcu_index;
+};
+
+class EnableLcuAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t lcu_index,
+ uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count);
+ EnableLcuAction(EnableLcuAction &&) = default;
+ EnableLcuAction(const EnableLcuAction &) = delete;
+ EnableLcuAction &operator=(EnableLcuAction &&) = delete;
+ EnableLcuAction &operator=(const EnableLcuAction &) = delete;
+ virtual ~EnableLcuAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ static CONTEXT_SWITCH_DEFS__ACTION_TYPE_t get_enable_lcu_action_type(bool is_default);
+ static Type get_enable_lcu_type(bool is_default);
+
+ EnableLcuAction(uint8_t cluster_index, uint8_t lcu_index,
+ uint8_t network_index, uint16_t kernel_done_address, uint32_t kernel_done_count, bool is_default);
+
+ const uint8_t m_cluster_index;
+ const uint8_t m_lcu_index;
+ const uint8_t m_network_index;
+ const uint16_t m_kernel_done_address;
+ const uint32_t m_kernel_done_count;
+ const bool m_is_default;
+};
+
+class EnableSequencerAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index, uint8_t initial_l3_cut,
+ uint16_t initial_l3_offset, uint32_t active_apu, uint32_t active_ia, uint64_t active_sc, uint64_t active_l2,
+ uint64_t l2_offset_0, uint64_t l2_offset_1);
+ EnableSequencerAction(EnableSequencerAction &&) = default;
+ EnableSequencerAction(const EnableSequencerAction &) = delete;
+ EnableSequencerAction &operator=(EnableSequencerAction &&) = delete;
+ EnableSequencerAction &operator=(const EnableSequencerAction &) = delete;
+ virtual ~EnableSequencerAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ EnableSequencerAction(uint8_t cluster_index, uint8_t initial_l3_cut, uint16_t initial_l3_offset,
+ uint32_t active_apu, uint32_t active_ia, uint64_t active_sc, uint64_t active_l2, uint64_t l2_offset_0,
+ uint64_t l2_offset_1);
+
+ const uint8_t m_cluster_index;
+ const uint8_t m_initial_l3_cut;
+ const uint16_t m_initial_l3_offset;
+ const uint32_t m_active_apu;
+ const uint32_t m_active_ia;
+ const uint64_t m_active_sc;
+ const uint64_t m_active_l2;
+ const uint64_t m_l2_offset_0;
+ const uint64_t m_l2_offset_1;
+};
+
+class WaitForSequencerAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t cluster_index);
+ WaitForSequencerAction(WaitForSequencerAction &&) = default;
+ WaitForSequencerAction(const WaitForSequencerAction &) = delete;
+ WaitForSequencerAction &operator=(WaitForSequencerAction &&) = delete;
+ WaitForSequencerAction &operator=(const WaitForSequencerAction &) = delete;
+ virtual ~WaitForSequencerAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ WaitForSequencerAction(uint8_t cluster_index);
+
+ const uint8_t m_cluster_index;
+};
+
+class AllowInputDataflowAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t stream_index);
+ AllowInputDataflowAction(AllowInputDataflowAction &&) = default;
+ AllowInputDataflowAction(const AllowInputDataflowAction &) = delete;
+ AllowInputDataflowAction &operator=(AllowInputDataflowAction &&) = delete;
+ AllowInputDataflowAction &operator=(const AllowInputDataflowAction &) = delete;
+ virtual ~AllowInputDataflowAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ explicit AllowInputDataflowAction(uint8_t stream_index);
+
+ const uint8_t m_stream_index;
+};
+
+class WaitForModuleConfigDoneAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t module_index);
+ WaitForModuleConfigDoneAction(WaitForModuleConfigDoneAction &&) = default;
+ WaitForModuleConfigDoneAction(const WaitForModuleConfigDoneAction &) = delete;
+ WaitForModuleConfigDoneAction &operator=(WaitForModuleConfigDoneAction &&) = delete;
+ WaitForModuleConfigDoneAction &operator=(const WaitForModuleConfigDoneAction &) = delete;
+ virtual ~WaitForModuleConfigDoneAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ WaitForModuleConfigDoneAction(uint8_t module_index);
+
+ const uint8_t m_module_index;
+};
+
+class DdrPairInfoAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &h2d_channel_id,
+ const vdma::ChannelId &d2h_channel_id, uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count);
+ DdrPairInfoAction(DdrPairInfoAction &&) = default;
+ DdrPairInfoAction(const DdrPairInfoAction &) = delete;
+ DdrPairInfoAction &operator=(DdrPairInfoAction &&) = delete;
+ DdrPairInfoAction &operator=(const DdrPairInfoAction &) = delete;
+ virtual ~DdrPairInfoAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ DdrPairInfoAction(const vdma::ChannelId &h2d_channel_id, const vdma::ChannelId &d2h_channel_id,
+ uint8_t network_index, uint32_t descriptors_per_frame, uint16_t descs_count);
+
+ const vdma::ChannelId m_h2d_channel_id;
+ const vdma::ChannelId m_d2h_channel_id;
+ const uint8_t m_network_index;
+ const uint32_t m_descriptors_per_frame;
+ const uint16_t m_descs_count;
+};
+
+class StartDdrBufferingTaskAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create();
+ StartDdrBufferingTaskAction(StartDdrBufferingTaskAction &&) = default;
+ StartDdrBufferingTaskAction(const StartDdrBufferingTaskAction &) = delete;
+ StartDdrBufferingTaskAction &operator=(StartDdrBufferingTaskAction &&) = delete;
+ StartDdrBufferingTaskAction &operator=(const StartDdrBufferingTaskAction &) = delete;
+ virtual ~StartDdrBufferingTaskAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ StartDdrBufferingTaskAction();
+};
+
+class ResetDdrBufferingTaskAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create();
+ ResetDdrBufferingTaskAction(ResetDdrBufferingTaskAction &&) = default;
+ ResetDdrBufferingTaskAction(const ResetDdrBufferingTaskAction &) = delete;
+ ResetDdrBufferingTaskAction &operator=(ResetDdrBufferingTaskAction &&) = delete;
+ ResetDdrBufferingTaskAction &operator=(const ResetDdrBufferingTaskAction &) = delete;
+ virtual ~ResetDdrBufferingTaskAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+private:
+ ResetDdrBufferingTaskAction();
+};
+
+class ChangeVdmaToStreamMapping : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id, uint8_t stream_index,
+ bool is_dummy_stream);
+ ChangeVdmaToStreamMapping(ChangeVdmaToStreamMapping &&) = default;
+ ChangeVdmaToStreamMapping(const ChangeVdmaToStreamMapping &) = delete;
+ ChangeVdmaToStreamMapping &operator=(ChangeVdmaToStreamMapping &&) = delete;
+ ChangeVdmaToStreamMapping &operator=(const ChangeVdmaToStreamMapping &) = delete;
+ virtual ~ChangeVdmaToStreamMapping() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ChangeVdmaToStreamMapping(const vdma::ChannelId &channel_id, uint8_t stream_index, bool is_dummy_stream);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const bool m_is_dummy_stream;
+};
+
+class WaitOutputTransferDoneAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t stream_index);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ explicit WaitOutputTransferDoneAction(uint8_t stream_index);
+
+ uint8_t m_stream_index;
+};
+
+class OpenBoundaryInputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ OpenBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ const vdma::ChannelId m_channel_id;
+ CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+};
+
+class OpenBoundaryOutputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ OpenBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ const vdma::ChannelId m_channel_id;
+ CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+};
+
+class ActivateBoundaryInputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info,
+ uint32_t initial_credit_size);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+ const uint32_t m_initial_credit_size;
+};
+
+class ActivateBoundaryOutputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+};
+
+class ActivateInterContextInputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateInterContextInputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info,
+ uint32_t initial_credit_size);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+ const uint32_t m_initial_credit_size;
+};
+
+class ActivateInterContextOutputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id, uint8_t stream_index,
+ uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateInterContextOutputChannelAction(const vdma::ChannelId &channel_id, uint8_t stream_index,
+ uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const uint8_t m_network_index;
+ const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+};
+
+class ActivateDdrInputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
+ const vdma::ChannelId &connected_d2h_channel_id);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateDdrInputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t initial_credit_size,
+ const vdma::ChannelId &connected_d2h_channel_id);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+ const uint32_t m_initial_credit_size;
+ const vdma::ChannelId m_connected_d2h_channel_id;
+};
+
+class ActivateDdrOutputChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ActivateDdrOutputChannelAction(const vdma::ChannelId &channel_id,
+ uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+ const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info, uint32_t buffered_rows_count);
+
+ const vdma::ChannelId m_channel_id;
+ const uint8_t m_stream_index;
+ const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+ const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
+ const uint32_t m_buffered_rows_count;
+};
+
+class ValidateChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ ValidateChannelAction(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction,
+ bool is_inter_context, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
+
+ const vdma::ChannelId m_channel_id;
+ const hailo_stream_direction_t m_stream_direction;
+ const bool m_is_inter_context;
+ const CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t m_host_buffer_type;
+ const uint32_t m_initial_credit_size;
+};
+
+class DeactivateChannelAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ DeactivateChannelAction(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction,
+ bool is_inter_context, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
+
+ const vdma::ChannelId m_channel_id;
+ const hailo_stream_direction_t m_stream_direction;
+ const bool m_is_inter_context;
+ const CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t m_host_buffer_type;
+ const uint32_t m_initial_credit_size;
+};
+
+class WaitDmaIdleAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t stream_index);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ explicit WaitDmaIdleAction(uint8_t stream_index);
+
+ uint8_t m_stream_index;
+};
+
+class WaitNmsIdleAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t aggregator_index,
+ uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index, uint8_t pred_cluster_ob_interface,
+ uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface);
+
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ WaitNmsIdleAction(uint8_t aggregator_index, uint8_t pred_cluster_ob_index, uint8_t pred_cluster_ob_cluster_index,
+ uint8_t pred_cluster_ob_interface, uint8_t succ_prepost_ob_index, uint8_t succ_prepost_ob_interface);
+
+ uint8_t m_aggregator_index;
+ uint8_t m_pred_cluster_ob_index;
+ uint8_t m_pred_cluster_ob_cluster_index;
+ uint8_t m_pred_cluster_ob_interface;
+ uint8_t m_succ_prepost_ob_index;
+ uint8_t m_succ_prepost_ob_interface;
+};
+
+class EnableNmsAction : public ContextSwitchConfigAction
+{
+public:
+ static Expected<ContextSwitchConfigActionPtr> create(uint8_t nms_unit_index, uint8_t network_index);
+ EnableNmsAction(EnableNmsAction &&) = default;
+ EnableNmsAction(const EnableNmsAction &) = delete;
+ EnableNmsAction &operator=(EnableNmsAction &&) = delete;
+ EnableNmsAction &operator=(const EnableNmsAction &) = delete;
+ virtual ~EnableNmsAction() = default;
+ virtual bool supports_repeated_block() const override;
+ virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+ EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index);
+
+ const uint8_t m_nms_unit_index;
+ const uint8_t m_network_index;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_ACTIONS_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file core_op_metadata.cpp
+ * @brief Contains all relevant information about a core-op from the hef.
+ **/
+
+#include "core_op_metadata.hpp"
+#include <numeric>
+
+namespace hailort
+{
+
+static void get_demuxes_names_impl(const LayerInfo &info, std::vector<std::string> &res)
+{
+ if (!info.is_mux) {
+ res.push_back(info.name);
+ } else {
+ for (auto &pred : info.predecessor) {
+ get_demuxes_names_impl(pred, res);
+ }
+ }
+}
+
+static std::vector<std::string> get_demuxes_names(const LayerInfo &info)
+{
+ std::vector<std::string> res;
+ get_demuxes_names_impl(info, res);
+ return res;
+}
+
+static bool is_edge_under_mux(const LayerInfo &info, const std::string &edge_name)
+{
+ if (!info.is_mux) {
+ return edge_name == info.name;
+ }
+ for (const auto &pred : info.predecessor) {
+ if (info.is_mux) {
+ if (is_edge_under_mux(pred, edge_name)) {
+ return true;
+ }
+ } else {
+ if (edge_name == pred.name) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+ContextMetadata::ContextMetadata(std::vector<ContextSwitchConfigActionPtr> &&actions,
+ ConfigBufferInfoMap&& config_buffers_info) :
+ m_actions(std::move(actions)),
+ m_config_buffers_info(std::move(config_buffers_info))
+{}
+
+const ConfigBufferInfoMap &ContextMetadata::config_buffers_info() const
+{
+ return m_config_buffers_info;
+}
+
+const std::vector<ContextSwitchConfigActionPtr> &ContextMetadata::get_actions() const
+{
+ return m_actions;
+}
+
+std::vector<ContextSwitchConfigActionPtr> ContextMetadata::get_actions_of_type(
+ const std::set<ContextSwitchConfigAction::Type> &action_types) const
+{
+ std::vector<ContextSwitchConfigActionPtr> filtered_actions;
+ for (const auto &action : m_actions) {
+ if (action_types.find(action->get_type()) != action_types.end()) {
+ filtered_actions.emplace_back(action);
+ }
+ }
+ return filtered_actions;
+}
+
+void ContextMetadata::add_boundary_layer(const LayerInfo &layer_info)
+{
+ if (HAILO_H2D_STREAM == layer_info.direction) {
+ m_boundary_input_layers.push_back(layer_info);
+ } else {
+ m_boundary_output_layers.push_back(layer_info);
+ }
+}
+
+void ContextMetadata::add_inter_context_layer(const LayerInfo &layer_info)
+{
+ if (HAILO_H2D_STREAM == layer_info.direction) {
+ m_inter_context_input_layers.push_back(layer_info);
+ } else {
+ m_inter_context_output_layers.push_back(layer_info);
+ }
+}
+
+void ContextMetadata::add_ddr_layer(const LayerInfo &layer_info)
+{
+ if (HAILO_H2D_STREAM == layer_info.direction) {
+ m_ddr_input_layers.push_back(layer_info);
+ } else {
+ m_ddr_output_layers.push_back(layer_info);
+ }
+}
+
+const std::vector<LayerInfo> &ContextMetadata::get_boundary_input_layers() const
+{
+ return m_boundary_input_layers;
+}
+
+const std::vector<LayerInfo> &ContextMetadata::get_boundary_output_layers() const
+{
+ return m_boundary_output_layers;
+}
+
+const std::vector<LayerInfo> &ContextMetadata::get_inter_context_input_layers() const
+{
+ return m_inter_context_input_layers;
+}
+
+const std::vector<LayerInfo> &ContextMetadata::get_inter_context_output_layers() const
+{
+ return m_inter_context_output_layers;
+}
+
+const std::vector<LayerInfo> &ContextMetadata::get_ddr_input_layers() const
+{
+ return m_ddr_input_layers;
+}
+
+const std::vector<LayerInfo> &ContextMetadata::get_ddr_output_layers() const
+{
+ return m_ddr_output_layers;
+}
+
+Expected<size_t> ContextMetadata::get_layers_transfer_size(const std::vector<LayerInfo> &layer_infos) const
+{
+ size_t total_transfer_size = 0;
+ for (const auto &layer_info : layer_infos) {
+ auto transfer_size = LayerInfoUtils::get_transfer_size(layer_info);
+ CHECK_EXPECTED(transfer_size);
+ total_transfer_size += transfer_size.release();
+ }
+ return total_transfer_size;
+}
+
+Expected<size_t> ContextMetadata::get_context_transfer_size() const
+{
+ size_t total_transfer_size = 0;
+
+ // Calc config buffers
+ for (const auto &config_buffer_sizes : m_config_buffers_info) {
+ total_transfer_size += std::accumulate(config_buffer_sizes.second.begin(), config_buffer_sizes.second.end(), 0);
+ }
+
+ // Calc all edge layers
+ auto boundary_input_transfer_size = get_layers_transfer_size(m_boundary_input_layers);
+ CHECK_EXPECTED(boundary_input_transfer_size);
+ auto boundary_output_transfer_size = get_layers_transfer_size(m_boundary_output_layers);
+ CHECK_EXPECTED(boundary_output_transfer_size);
+ auto ddr_input_transfer_size = get_layers_transfer_size(m_ddr_input_layers);
+ CHECK_EXPECTED(ddr_input_transfer_size);
+ auto ddr_output_transfer_size = get_layers_transfer_size(m_ddr_output_layers);
+ CHECK_EXPECTED(ddr_output_transfer_size);
+ auto inter_context_input_transfer_size = get_layers_transfer_size(m_inter_context_input_layers);
+ CHECK_EXPECTED(inter_context_input_transfer_size);
+ auto inter_context_output_transfer_size = get_layers_transfer_size(m_inter_context_output_layers);
+ CHECK_EXPECTED(inter_context_output_transfer_size);
+
+ total_transfer_size +=
+ boundary_input_transfer_size.release() + boundary_output_transfer_size.release() +
+ ddr_input_transfer_size.release() + ddr_output_transfer_size.release() +
+ inter_context_input_transfer_size.release() + inter_context_output_transfer_size.release();
+
+ return total_transfer_size;
+}
+
+CoreOpMetadata::CoreOpMetadata(const std::string &core_op_name,
+ ContextMetadata &&preliminary_context,
+ std::vector<ContextMetadata> &&dynamic_contexts,
+ std::vector<ConfigChannelInfo> &&config_channels_info,
+ std::vector<std::string> &&sorted_output_names,
+ SupportedFeatures &supported_features,
+ const std::vector<std::string> &sorted_network_names)
+ : m_preliminary_context(std::move(preliminary_context)),
+ m_dynamic_contexts(std::move(dynamic_contexts)),
+ m_config_channels_info(std::move(config_channels_info)),
+ m_core_op_name(core_op_name), m_sorted_output_names(std::move(sorted_output_names)),
+ m_supported_features(supported_features), m_sorted_network_names(sorted_network_names) {}
+
+Expected<LayerInfo> CoreOpMetadata::get_layer_info_by_stream_name(const std::string &stream_name) const
+{
+ for (auto layer_info : get_all_layer_infos()) {
+ if (layer_info.name == stream_name) {
+ return layer_info;
+ }
+ }
+ LOGGER__ERROR("Failed to find layer with name {}", stream_name);
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+std::vector<LayerInfo> CoreOpMetadata::get_input_layer_infos() const
+{
+ std::vector<LayerInfo> res;
+ // Edge layers exists only in the dynamic context.
+ for (const auto &context : m_dynamic_contexts) {
+ for (const auto &layer_info : context.get_boundary_input_layers()) {
+ res.emplace_back(layer_info);
+ }
+ }
+ return res;
+}
+
+std::vector<LayerInfo> CoreOpMetadata::get_output_layer_infos() const
+{
+ std::vector<LayerInfo> res;
+ // Edge layers exists only in the dynamic context.
+ for (const auto &context : m_dynamic_contexts) {
+ for (const auto &layer_info : context.get_boundary_output_layers()) {
+ res.emplace_back(layer_info);
+ }
+ }
+ return res;
+}
+
+std::vector<LayerInfo> CoreOpMetadata::get_all_layer_infos() const
+{
+ const auto input_layer_infos = get_input_layer_infos();
+ const auto output_layer_infos = get_output_layer_infos();
+
+ std::vector<LayerInfo> res;
+ res.reserve(input_layer_infos.size() + output_layer_infos.size());
+ res.insert(res.end(), input_layer_infos.begin(), input_layer_infos.end());
+ res.insert(res.end(), output_layer_infos.begin(), output_layer_infos.end());
+
+ return res;
+}
+
+Expected<std::vector<LayerInfo>> CoreOpMetadata::get_input_layer_infos(const std::string &network_name) const
+{
+ std::vector<LayerInfo> res;
+ // Edge layers exists only in the dynamic context.
+ for (const auto &context : m_dynamic_contexts) {
+ for (const auto &layer_info : context.get_boundary_input_layers()) {
+ if ((layer_info.network_name == network_name) || (network_name.empty()) || (network_name == default_network_name())) {
+ res.emplace_back(layer_info);
+ }
+ }
+ }
+ CHECK_AS_EXPECTED(res.size() > 0, HAILO_NOT_FOUND, "Network name {} is not found in networks metadata", network_name);
+ return res;
+}
+
+Expected<std::vector<LayerInfo>> CoreOpMetadata::get_output_layer_infos(const std::string &network_name) const
+{
+ std::vector<LayerInfo> res;
+ // Edge layers exists only in the dynamic context.
+ for (const auto &context : m_dynamic_contexts) {
+ for (auto &layer_info : context.get_boundary_output_layers()) {
+ if ((layer_info.network_name == network_name) || (network_name.empty()) || (network_name == default_network_name())) {
+ res.emplace_back(layer_info);
+ }
+ }
+ }
+ CHECK_AS_EXPECTED(res.size() > 0, HAILO_NOT_FOUND, "Network name {} is not found in networks metadata", network_name);
+ return res;
+}
+
+const ContextMetadata &CoreOpMetadata::preliminary_context() const
+{
+ return m_preliminary_context;
+}
+
+const std::vector<ContextMetadata> &CoreOpMetadata::dynamic_contexts() const
+{
+ return m_dynamic_contexts;
+}
+
+const std::vector<ConfigChannelInfo> &CoreOpMetadata::config_channels_info() const
+{
+ return m_config_channels_info;
+}
+
+Expected<std::vector<LayerInfo>> CoreOpMetadata::get_all_layer_infos(const std::string &network_name) const
+{
+ auto input_layer_infos = get_input_layer_infos(network_name);
+ CHECK_EXPECTED(input_layer_infos);
+
+ auto output_layer_infos = get_output_layer_infos(network_name);
+ CHECK_EXPECTED(output_layer_infos);
+
+ std::vector<LayerInfo> res;
+ res.reserve(input_layer_infos->size() + output_layer_infos->size());
+ res.insert(res.end(), input_layer_infos->begin(), input_layer_infos->end());
+ res.insert(res.end(), output_layer_infos->begin(), output_layer_infos->end());
+
+ return res;
+}
+
+Expected<std::vector<hailo_stream_info_t>> CoreOpMetadata::get_input_stream_infos(const std::string &network_name) const
+{
+ auto input_layer_infos = get_input_layer_infos(network_name);
+ CHECK_EXPECTED(input_layer_infos);
+
+ return convert_layer_infos_to_stream_infos(input_layer_infos.value());
+}
+
+Expected<std::vector<hailo_stream_info_t>> CoreOpMetadata::get_output_stream_infos(const std::string &network_name) const
+{
+ auto output_layer_infos = get_output_layer_infos(network_name);
+ CHECK_EXPECTED(output_layer_infos);
+
+ return convert_layer_infos_to_stream_infos(output_layer_infos.value());
+}
+
+Expected<std::vector<hailo_stream_info_t>> CoreOpMetadata::get_all_stream_infos(const std::string &network_name) const
+{
+ auto input_stream_infos = get_input_stream_infos(network_name);
+ CHECK_EXPECTED(input_stream_infos);
+
+ auto output_stream_infos = get_output_stream_infos(network_name);
+ CHECK_EXPECTED(output_stream_infos);
+
+ std::vector<hailo_stream_info_t> res;
+ res.reserve(input_stream_infos->size() + output_stream_infos->size());
+ res.insert(res.end(), input_stream_infos->begin(), input_stream_infos->end());
+ res.insert(res.end(), output_stream_infos->begin(), output_stream_infos->end());
+
+ return res;
+}
+
+Expected<std::vector<hailo_vstream_info_t>> CoreOpMetadata::get_input_vstream_infos(const std::string &network_name) const
+{
+ auto input_layer_infos = get_input_layer_infos(network_name);
+ CHECK_EXPECTED(input_layer_infos);
+
+ return convert_layer_infos_to_vstream_infos(input_layer_infos.value());
+}
+
+Expected<std::vector<hailo_vstream_info_t>> CoreOpMetadata::get_output_vstream_infos(const std::string &network_name) const
+{
+ std::vector<hailo_vstream_info_t> res;
+ if (m_supported_features.hailo_net_flow) {
+ res = m_output_vstreams_infos;
+ return res;
+ }
+ auto expected_output_layer_infos = get_output_layer_infos(network_name);
+ CHECK_EXPECTED(expected_output_layer_infos);
+ auto output_layer_infos = expected_output_layer_infos.release();
+
+ res = convert_layer_infos_to_vstream_infos(output_layer_infos);
+
+ hailo_status status = HAILO_SUCCESS;
+ std::sort(res.begin(), res.end(),
+ [this, &status](const auto &info1, const auto &info2)
+ {
+ const auto index1 = std::find(m_sorted_output_names.begin(), m_sorted_output_names.end(), std::string(info1.name));
+ const auto index2 = std::find(m_sorted_output_names.begin(), m_sorted_output_names.end(), std::string(info2.name));
+
+ if (m_sorted_output_names.end() == index1) {
+ LOGGER__ERROR("Stream {} not found in sorted output names", info1.name);
+ status = HAILO_INTERNAL_FAILURE;
+ return false;
+ }
+
+ if (m_sorted_output_names.end() == index2) {
+ LOGGER__ERROR("Stream {} not found in sorted output names", info2.name);
+ status = HAILO_INTERNAL_FAILURE;
+ return false;
+ }
+
+ return index1 < index2;
+ });
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return res;
+}
+
+Expected<std::vector<hailo_vstream_info_t>> CoreOpMetadata::get_all_vstream_infos(const std::string &network_name) const
+{
+ auto input_vstream_infos = get_input_vstream_infos(network_name);
+ CHECK_EXPECTED(input_vstream_infos);
+
+ auto output_vstream_infos = get_output_vstream_infos(network_name);
+ CHECK_EXPECTED(output_vstream_infos);
+
+ std::vector<hailo_vstream_info_t> res;
+ res.reserve(input_vstream_infos->size() + output_vstream_infos->size());
+ res.insert(res.end(), input_vstream_infos->begin(), input_vstream_infos->end());
+ res.insert(res.end(), output_vstream_infos->begin(), output_vstream_infos->end());
+
+ return res;
+}
+
+Expected<std::vector<std::string>> CoreOpMetadata::get_vstream_names_from_stream_name(const std::string &stream_name) const
+{
+ std::vector<std::string> results;
+ for (auto &layer_info : get_all_layer_infos()) {
+ if (stream_name == layer_info.name) {
+ if (layer_info.is_defused_nms) {
+ return std::vector<std::string> (1, layer_info.fused_nms_layer[0].name);
+ } else if (layer_info.is_mux) {
+ return get_demuxes_names(layer_info);
+ } else {
+ return std::vector<std::string> (1, layer_info.name);
+ }
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+Expected<std::vector<std::string>> CoreOpMetadata::get_stream_names_from_vstream_name(const std::string &vstream_name) const
+{
+ std::vector<std::string> results;
+ for (auto &layer_info : get_all_layer_infos()) {
+ if (layer_info.is_mux) {
+ if (is_edge_under_mux(layer_info, vstream_name)) {
+ // vstream_name is a demux of the layer info
+ results.push_back(layer_info.name);
+ }
+ } else if (layer_info.is_defused_nms) {
+ if (vstream_name == layer_info.fused_nms_layer[0].name) {
+ // vstream_name is the fused-layer of the layer info
+ results.push_back(layer_info.name);
+ }
+ } else if (m_supported_features.hailo_net_flow && layer_info.direction == HAILO_D2H_STREAM) {
+ results.push_back(layer_info.name);
+ } else if (vstream_name == layer_info.name) {
+ // vstream_name is a regular stream
+ results.push_back(layer_info.name);
+ }
+ }
+ CHECK_AS_EXPECTED(0 < results.size(), HAILO_NOT_FOUND, "Did not found vstream {}", vstream_name);
+ return results;
+}
+
+std::vector<hailo_stream_info_t> CoreOpMetadata::convert_layer_infos_to_stream_infos(const std::vector<LayerInfo> &layer_infos) const
+{
+ std::vector<hailo_stream_info_t> res;
+ for (auto &layer_info : layer_infos) {
+ res.push_back(LayerInfoUtils::get_stream_info_from_layer_info(layer_info));
+ }
+ return res;
+}
+
+std::vector<hailo_vstream_info_t> CoreOpMetadata::convert_layer_infos_to_vstream_infos(const std::vector<LayerInfo> &layer_infos) const
+{
+ std::vector<hailo_vstream_info_t> res;
+ for (auto &layer_info : layer_infos) {
+ auto vstream_infos = LayerInfoUtils::get_vstream_infos_from_layer_info(layer_info);
+ for (const auto &vstream_info : vstream_infos) {
+ // In case of fused nms layers, several LayerInfos will contain data about the same fused layer
+ if (!LayerInfoUtils::vstream_info_already_in_vector(res, vstream_info.name)) {
+ res.push_back(vstream_info);
+ }
+ }
+ }
+ return res;
+}
+
+Expected<std::vector<hailo_network_info_t>> CoreOpMetadata::get_network_infos() const
+{
+ std::vector<hailo_network_info_t> network_infos;
+ network_infos.reserve(m_sorted_network_names.size());
+ for (auto const &network_name : m_sorted_network_names) {
+ hailo_network_info_t network_info = {};
+ CHECK_AS_EXPECTED(HAILO_MAX_NETWORK_NAME_SIZE >= (network_name.length() + 1), HAILO_INTERNAL_FAILURE,
+ "The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", network_name);
+ memcpy(network_info.name, network_name.c_str(), network_name.length() + 1);
+
+ network_infos.push_back(network_info);
+ }
+
+ return network_infos;
+}
+
+size_t CoreOpMetadata::get_contexts_count()
+{
+ return (m_dynamic_contexts.size() + CONTROL_PROTOCOL__CONTEXT_SWITCH_NUMBER_OF_NON_DYNAMIC_CONTEXTS);
+}
+
+Expected<size_t> CoreOpMetadata::get_total_transfer_size()
+{
+ size_t total_transfer_size = 0;
+ for (const auto &dynamic_context : m_dynamic_contexts) {
+ auto context_size = dynamic_context.get_context_transfer_size();
+ CHECK_EXPECTED(context_size);
+ total_transfer_size += context_size.release();
+ }
+ return total_transfer_size;
+}
+
+Expected<CoreOpMetadata> CoreOpMetadataPerArch::get_metadata(uint32_t partial_clusters_layout_bitmap)
+{
+ if (PARTIAL_CLUSTERS_LAYOUT_IGNORE == partial_clusters_layout_bitmap) {
+ // Passing PARTIAL_CLUSTERS_LAYOUT_IGNORE is magic for getting one of the metadata
+ assert(0 != m_metadata_per_arch.size());
+ auto result = m_metadata_per_arch.begin()->second;
+ return result;
+ }
+ if (contains(m_metadata_per_arch, partial_clusters_layout_bitmap)) {
+ auto result = m_metadata_per_arch[partial_clusters_layout_bitmap];
+ return result;
+ }
+ LOGGER__ERROR("CoreOpPerArch does not contain metadata for partial_clusters_layout_bitmap {}", partial_clusters_layout_bitmap);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+void CoreOpMetadataPerArch::add_metadata(const CoreOpMetadata &metadata, uint32_t partial_clusters_layout_bitmap)
+{
+ m_metadata_per_arch[partial_clusters_layout_bitmap] = metadata;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file core_op_metadata.hpp
+ * @brief Contains all relevant information about a core-op from the hef.
+ **/
+
+#ifndef _HAILO_CORE_OP_METADATA_HPP_
+#define _HAILO_CORE_OP_METADATA_HPP_
+
+#include "hef/layer_info.hpp"
+#include "hef/context_switch_actions.hpp"
+
+
+namespace hailort
+{
+
+constexpr const uint32_t PARTIAL_CLUSTERS_LAYOUT_IGNORE = static_cast<uint32_t>(-1);
+
+struct SupportedFeatures {
+ bool padded_ddr_buffers = false;
+ bool multi_network_support = false;
+ bool multi_context = false;
+ bool preliminary_run_asap = false;
+ bool hailo_net_flow = false;
+};
+
+// For each config_stream_index we store vector of all ccw write length. The vector is used to build the config buffer.g
+using ConfigBufferInfoMap = std::unordered_map<uint8_t, std::vector<uint32_t>>;
+
+
+class ContextMetadata final {
+public:
+ ContextMetadata() = default; // TODO HRT-8478: remove
+ ContextMetadata(std::vector<ContextSwitchConfigActionPtr> &&actions,
+ ConfigBufferInfoMap&& config_buffers_info);
+
+ const std::vector<ContextSwitchConfigActionPtr> &get_actions() const;
+ std::vector<ContextSwitchConfigActionPtr> get_actions_of_type(
+ const std::set<ContextSwitchConfigAction::Type> &action_types) const;
+
+ const ConfigBufferInfoMap &config_buffers_info() const;
+
+ void add_boundary_layer(const LayerInfo &layer_info);
+ void add_inter_context_layer(const LayerInfo &layer_info);
+ void add_ddr_layer(const LayerInfo &layer_info);
+
+ const std::vector<LayerInfo> &get_boundary_input_layers() const;
+ const std::vector<LayerInfo> &get_boundary_output_layers() const;
+ const std::vector<LayerInfo> &get_inter_context_input_layers() const;
+ const std::vector<LayerInfo> &get_inter_context_output_layers() const;
+ const std::vector<LayerInfo> &get_ddr_input_layers() const;
+ const std::vector<LayerInfo> &get_ddr_output_layers() const;
+
+ Expected<size_t> get_layers_transfer_size(const std::vector<LayerInfo> &layer_infos) const;
+ Expected<size_t> get_context_transfer_size() const;
+private:
+ std::vector<ContextSwitchConfigActionPtr> m_actions;
+ ConfigBufferInfoMap m_config_buffers_info;
+
+ std::vector<LayerInfo> m_boundary_input_layers;
+ std::vector<LayerInfo> m_boundary_output_layers;
+ std::vector<LayerInfo> m_inter_context_input_layers;
+ std::vector<LayerInfo> m_inter_context_output_layers;
+ std::vector<LayerInfo> m_ddr_input_layers;
+ std::vector<LayerInfo> m_ddr_output_layers;
+};
+
+struct ConfigChannelInfo {
+ uint8_t engine_index;
+};
+
+class CoreOpMetadata final {
+public:
+ CoreOpMetadata() = default; // TODO HRT-8478: remove
+ CoreOpMetadata(const std::string &core_op_name,
+ ContextMetadata &&preliminary_context,
+ std::vector<ContextMetadata> &&dynamic_contexts,
+ std::vector<ConfigChannelInfo> &&config_channels_info,
+ std::vector<std::string> &&sorted_output_names,
+ SupportedFeatures &supported_features,
+ const std::vector<std::string> &sorted_network_names);
+
+ std::vector<LayerInfo> get_input_layer_infos() const;
+ std::vector<LayerInfo> get_output_layer_infos() const;
+ std::vector<LayerInfo> get_all_layer_infos() const;
+
+ Expected<std::vector<LayerInfo>> get_input_layer_infos(const std::string &network_name) const;
+ Expected<std::vector<LayerInfo>> get_output_layer_infos(const std::string &network_name) const;
+ Expected<std::vector<LayerInfo>> get_all_layer_infos(const std::string &network_name) const;
+ Expected<LayerInfo> get_layer_info_by_stream_name(const std::string &stream_name) const;
+
+ const ContextMetadata &preliminary_context() const;
+ const std::vector<ContextMetadata> &dynamic_contexts() const;
+
+ const std::vector<ConfigChannelInfo> &config_channels_info() const;
+
+ Expected<std::vector<hailo_stream_info_t>> get_input_stream_infos(const std::string &network_name = "") const;
+ Expected<std::vector<hailo_stream_info_t>> get_output_stream_infos(const std::string &network_name = "") const;
+ Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name = "") const;
+
+ // TODO: HRT-9546 - Remove, should only be in CNG
+ Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name = "") const;
+ Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name = "") const;
+ Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name = "") const;
+
+ // TODO: HRT-9546 - Remove, should only be in CNG - need to decide if relevant only for one CoreOp case.
+ Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name) const;
+ Expected<std::vector<std::string>> get_stream_names_from_vstream_name(const std::string &vstream_name) const;
+
+ Expected<std::vector<hailo_network_info_t>> get_network_infos() const;
+
+ size_t get_contexts_count();
+
+ const std::string &core_op_name() const
+ {
+ return m_core_op_name;
+ }
+
+ const std::string default_network_name() const
+ {
+ return HailoRTDefaults::get_network_name(m_core_op_name);
+ }
+
+ const std::vector<std::string> get_sorted_output_names() const
+ {
+ return m_sorted_output_names;
+ }
+
+ // duplicated for each CoreOp
+ const SupportedFeatures &supported_features() const
+ {
+ return m_supported_features;
+ }
+
+ const std::vector<std::string> &get_network_names() const
+ {
+ return m_sorted_network_names;
+ }
+
+ // TODO: HRT-9546 - Move to CNG
+ void add_output_vstream_info(const hailo_vstream_info_t &output_vstream_info) {
+ m_output_vstreams_infos.push_back(output_vstream_info);
+ }
+
+ Expected<size_t> get_total_transfer_size();
+
+private:
+ std::vector<hailo_stream_info_t> convert_layer_infos_to_stream_infos(const std::vector<LayerInfo> &layer_infos) const;
+ std::vector<hailo_vstream_info_t> convert_layer_infos_to_vstream_infos(const std::vector<LayerInfo> &layer_infos) const;
+
+ ContextMetadata m_preliminary_context;
+ std::vector<ContextMetadata> m_dynamic_contexts;
+ std::vector<ConfigChannelInfo> m_config_channels_info;
+ std::string m_core_op_name;
+ std::vector<std::string> m_sorted_output_names;
+ SupportedFeatures m_supported_features;
+ std::vector<std::string> m_sorted_network_names;
+
+ // TODO: remove this from here! NetworkGroupMetadata should be CoreOpMetadata and contain no net_flow information! (HRT-9546)
+ // To add insult to injury, this is being constructed lazyly by add_output_layer_info
+ std::vector<hailo_vstream_info_t> m_output_vstreams_infos; // Valid only in case of post process
+};
+
+
+class CoreOpMetadataPerArch final
+{
+public:
+ CoreOpMetadataPerArch() = default;
+
+ Expected<CoreOpMetadata> get_metadata(uint32_t partial_clusters_layout_bitmap);
+ void add_metadata(const CoreOpMetadata &metadata, uint32_t partial_clusters_layout_bitmap);
+
+private:
+ std::map<uint32_t, CoreOpMetadata> m_metadata_per_arch;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CORE_OP_METADATA_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hef.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/hef.hpp"
+#include "hailo/stream.hpp"
+#include "hailo/device.hpp"
+#include "hailo/hailort_common.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/string_utils.hpp"
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+#include "common/file_utils.hpp"
+
+#include "net_flow/ops/nms_post_process.hpp"
+#include "net_flow/ops/yolo_post_process.hpp"
+#include "net_flow/ops/ssd_post_process.hpp"
+#include "hef/hef_internal.hpp"
+#include "vdma/pcie/pcie_device.hpp"
+#include "vdma/vdma_config_manager.hpp"
+#include "eth/hcp_config_core_op.hpp"
+#include "hef/layer_info.hpp"
+#include "device_common/control.hpp"
+
+#include "byte_order.h"
+#include "context_switch_defs.h"
+
+#include <fstream>
+#include <memory>
+#include <limits>
+#include <stdint.h>
+#include <stdbool.h>
+#include <set>
+#include <algorithm>
+#include <cstring>
+#include <numeric>
+
+
+namespace hailort
+{
+
+#define HEF__MD5_BUFFER_SIZE (1024)
+#define DEFAULT_BATCH_SIZE (1)
+#define SKIP_SPACE_COMMA_CHARACTERS (2)
+
+static const uint8_t ENABLE_LCU_CONTROL_WORD[4] = {1, 0, 0, 0};
+
+#define TAB (" ")
+
+static std::string add_tabs(uint8_t count)
+{
+ // Each TAB counts as 4 spaces
+ std::string res = "";
+ for (uint8_t i = 0; i < count; i++) {
+ res = res + TAB;
+ }
+ return res;
+}
+
+static std::string get_shape_str(const hailo_stream_info_t &stream_info)
+{
+ switch (stream_info.format.order)
+ {
+ case HAILO_FORMAT_ORDER_HAILO_NMS:
+ return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
+ "(number of classes: " + std::to_string(stream_info.nms_info.number_of_classes) +
+ ", max_bboxes_per_class: "+ std::to_string(stream_info.nms_info.max_bboxes_per_class) + ")";
+ case HAILO_FORMAT_ORDER_NC:
+ return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
+ "(" + std::to_string(stream_info.hw_shape.features) + ")";
+ case HAILO_FORMAT_ORDER_NHW:
+ return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
+ "(" + std::to_string(stream_info.hw_shape.height) + "x" + std::to_string(stream_info.hw_shape.width) + ")";
+ default:
+ return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
+ "(" + std::to_string(stream_info.hw_shape.height) + "x" + std::to_string(stream_info.hw_shape.width) +
+ "x" + std::to_string(stream_info.hw_shape.features) + ")";
+ }
+}
+
+static std::string get_shape_str(const hailo_vstream_info_t &vstream_info)
+{
+ switch (vstream_info.format.order)
+ {
+ case HAILO_FORMAT_ORDER_HAILO_NMS:
+ return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
+ "(number of classes: " + std::to_string(vstream_info.nms_shape.number_of_classes) +
+ ", max_bboxes_per_class: " + std::to_string(vstream_info.nms_shape.max_bboxes_per_class) + ")";
+ case HAILO_FORMAT_ORDER_NC:
+ return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
+ "(" + std::to_string(vstream_info.shape.features) + ")";
+ case HAILO_FORMAT_ORDER_NHW:
+ return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
+ "(" +std::to_string(vstream_info.shape.height) + "x" + std::to_string(vstream_info.shape.width) + ")";
+ default:
+ return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
+ "(" + std::to_string(vstream_info.shape.height) + "x" + std::to_string(vstream_info.shape.width) + "x" +
+ std::to_string(vstream_info.shape.features) + ")";
+ }
+}
+
+#pragma pack(push, 1)
+typedef struct {
+ uint32_t words_count;
+ uint32_t address;
+} CcwHeader;
+#pragma pack(pop)
+
+bool ConfigureNetworkParams::operator==(const ConfigureNetworkParams &other) const
+{
+ for (auto &name_param_pair : network_params_by_name) {
+ if ((other.network_params_by_name.find(name_param_pair.first) == other.network_params_by_name.end()) ||
+ (name_param_pair.second.batch_size != other.network_params_by_name.at(name_param_pair.first).batch_size) ) {
+ return false;
+ }
+ }
+ return (batch_size == other.batch_size) && (power_mode == other.power_mode) && (latency == other.latency);
+}
+
+bool ConfigureNetworkParams::operator!=(const ConfigureNetworkParams &other) const
+{
+ return !(*this == other);
+}
+
+
+// Note: Can't add the definition in the header. This will lead to the following error:
+// /usr/include/c++/7/bits/unique_ptr.h: In instantiation of 'void std::default_delete<_Tp>::operator()(_Tp*) const [with _Tp = Hef::Impl]':
+// /usr/include/c++/7/bits/unique_ptr.h:263:17: required from 'std::unique_ptr<_Tp, _Dp>::~unique_ptr() [with _Tp = Hef::Impl; _Dp = std::default_delete<Hef::Impl>]'
+// /local/users/projects/platform-sw/hailort/libhailort/src/../include/hailo/hef.hpp:61:7: required from 'Expected<T>::~Expected() [with T = Hef]'
+// /local/users/projects/platform-sw/hailort/hailortcli/run_command.cpp:705:51: required from here
+// /usr/include/c++/7/bits/unique_ptr.h:76:22: error: invalid application of 'sizeof' to incomplete type 'Hef::Impl'
+// static_assert(sizeof(_Tp)>0,
+Hef::~Hef() = default;
+Hef::Hef(Hef &&) = default;
+Hef &Hef::operator=(Hef &&) = default;
+
+Expected<Hef> Hef::create(const std::string &hef_path)
+{
+ auto impl = Hef::Impl::create(hef_path);
+ CHECK_EXPECTED(impl);
+
+ // TODO: can we do this without the copy ctor here (i.e. make the impl as a unique_ptr to begin with)
+ return Hef(make_unique_nothrow<Impl>(impl.release()));
+}
+
+Expected<Hef> Hef::create(const MemoryView &hef_buffer)
+{
+ auto impl = Hef::Impl::create(hef_buffer);
+ CHECK_EXPECTED(impl);
+
+ // TODO: can we do this without the copy ctor here (i.e. make the impl as a unique_ptr to begin with)
+ return Hef(make_unique_nothrow<Impl>(impl.release()));
+}
+
+Hef::Hef(std::unique_ptr<Impl> pimpl) :
+ pimpl(std::move(pimpl))
+{}
+
+Expected<std::vector<hailo_stream_info_t>> Hef::get_input_stream_infos(const std::string &name)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->get_input_stream_infos(network_pair.value().first, network_pair.value().second);
+}
+
+Expected<std::vector<hailo_stream_info_t>> Hef::get_output_stream_infos(const std::string &name)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->get_output_stream_infos(network_pair.value().first, network_pair.value().second);
+}
+
+Expected<std::vector<hailo_stream_info_t>> Hef::get_all_stream_infos(const std::string &name)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->get_all_stream_infos(network_pair.value().first, network_pair.value().second);
+}
+
+Expected<std::vector<hailo_network_info_t>> Hef::get_network_infos(const std::string &net_group_name)
+{
+ auto names_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(names_pair);
+ return pimpl->get_network_infos(names_pair->first);
+}
+
+Expected<hailo_stream_info_t> Hef::get_stream_info_by_name(const std::string &stream_name,
+ hailo_stream_direction_t stream_direction, const std::string &net_group_name)
+{
+ // Addressing the situation where net_group_name == ""
+ auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(net_group_name_pair);
+ auto net_group_name_str = net_group_name_pair->first;
+
+ return pimpl->get_stream_info_by_name(stream_name, stream_direction, net_group_name_str);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> Hef::get_input_vstream_infos(const std::string &name)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->get_input_vstream_infos(network_pair.value().first, network_pair.value().second);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> Hef::get_output_vstream_infos(const std::string &name)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->get_output_vstream_infos(network_pair.value().first, network_pair.value().second);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> Hef::get_all_vstream_infos(const std::string &name)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->get_all_vstream_infos(network_pair.value().first, network_pair.value().second);
+}
+
+Expected<std::vector<std::string>> Hef::get_sorted_output_names(const std::string &net_group_name)
+{
+ // Addressing the situation where net_group_name == ""
+ auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(net_group_name_pair);
+ auto net_group_name_str = net_group_name_pair->first;
+
+ return pimpl->get_sorted_output_names(net_group_name_str);
+}
+
+Expected<size_t> Hef::get_number_of_input_streams(const std::string &net_group_name)
+{
+ // Addressing the situation where net_group_name == ""
+ auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(net_group_name_pair);
+ auto net_group_name_str = net_group_name_pair->first;
+
+ return pimpl->get_number_of_input_streams(net_group_name_str);
+}
+
+Expected<size_t> Hef::get_number_of_output_streams(const std::string &net_group_name)
+{
+ // Addressing the situation where net_group_name == ""
+ auto net_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(net_group_name_pair);
+ auto net_group_name_str = net_group_name_pair->first;
+
+ return pimpl->get_number_of_output_streams(net_group_name_str);
+}
+
+Expected<float64_t> Hef::get_bottleneck_fps(const std::string &net_group_name)
+{
+ return pimpl->get_bottleneck_fps(net_group_name);
+}
+
+
+Expected<hailo_device_architecture_t> Hef::get_hef_device_arch()
+{
+ return DeviceBase::hef_arch_to_device_arch(pimpl->get_device_arch());
+}
+
+Expected<std::string> Hef::device_arch_to_string(const hailo_device_architecture_t arch)
+{
+ return HailoRTCommon::get_device_arch_str(arch);
+}
+
+Expected<std::string> Hef::get_vstream_name_from_original_name(const std::string &original_name,
+ const std::string &net_group_name)
+{
+ return pimpl->get_vstream_name_from_original_name(original_name, net_group_name);
+}
+
+Expected<std::vector<std::string>> Hef::get_original_names_from_vstream_name(const std::string &stream_name,
+ const std::string &net_group_name)
+{
+ return pimpl->get_original_names_from_vstream_name(stream_name, net_group_name);
+}
+
+Expected<std::vector<std::string>> Hef::get_stream_names_from_vstream_name(const std::string &vstream_name,
+ const std::string &net_group_name)
+{
+ auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(network_group_name_pair);
+ auto net_group_name_str = network_group_name_pair->first;
+
+ return pimpl->get_stream_names_from_vstream_name(vstream_name, net_group_name_str);
+}
+
+Expected<std::vector<std::string>> Hef::get_vstream_names_from_stream_name(const std::string &stream_name,
+ const std::string &net_group_name)
+{
+ auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(network_group_name_pair);
+ auto net_group_name_str = network_group_name_pair->first;
+
+ return pimpl->get_vstream_names_from_stream_name(stream_name, net_group_name_str);
+}
+
+Expected<Hef::Impl> Hef::Impl::create(const std::string &hef_path)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ Impl hef(hef_path, status);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed creating HEF");
+ return make_unexpected(status);
+ }
+
+ return hef;
+}
+
+Expected<Hef::Impl> Hef::Impl::create(const MemoryView &hef_buffer)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ Impl hef(hef_buffer, status);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed creating HEF");
+ return make_unexpected(status);
+ }
+
+ return hef;
+}
+
+static hailo_status calc_istream_md5(std::ifstream &s, MD5_SUM_t &calculated_md5)
+{
+ char md5_buffer[HEF__MD5_BUFFER_SIZE] = {};
+ MD5_CTX md5 = {};
+
+ auto beg_pos = s.tellg();
+ CHECK(-1 != beg_pos, HAILO_FILE_OPERATION_FAILURE, "ifstream::tellg() failed");
+
+ MD5_Init(&md5);
+ while (!s.eof()) {
+ s.read(md5_buffer, HEF__MD5_BUFFER_SIZE);
+ CHECK(!s.bad(), HAILO_FILE_OPERATION_FAILURE, "ifstream::read() failed");
+ MD5_Update(&md5, &md5_buffer, static_cast<size_t>(s.gcount()));
+ }
+ MD5_Final(calculated_md5, &md5);
+
+ s.clear();
+ s.seekg(beg_pos, s.beg);
+ CHECK(s.good(), HAILO_FILE_OPERATION_FAILURE, "ifstream::seekg() failed");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Hef::Impl::validate_hef_header(const hef__header_t &header, MD5_SUM_t &calculated_md5, size_t proto_size)
+{
+ CHECK(HEADER_MAGIC == BYTE_ORDER__htonl(header.magic), HAILO_INVALID_HEF,
+ "HEF magic does not match. detected magic - {:x}", header.magic);
+
+ CHECK(HEADER_VERSION == BYTE_ORDER__htonl(header.version), HAILO_INVALID_HEF, "HEF version does not match");
+
+ CHECK(proto_size == BYTE_ORDER__htonl(header.hef_proto_length), HAILO_INVALID_HEF,
+ "HEF file length does not match");
+
+ CHECK(0 == memcmp(&calculated_md5, &header.expected_md5, sizeof(MD5_SUM_t)), HAILO_INVALID_HEF,
+ "HEF md5 does not match");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Hef::Impl::validate_hef_extensions()
+{
+ std::vector<std::string> unsupported_extensions;
+ for (const auto &extension : m_hef_extensions) {
+ if ((extension.type_index() >= m_supported_extensions_bitset.size()) || !m_supported_extensions_bitset.test(extension.type_index())) {
+ unsupported_extensions.emplace_back(extension.name());
+ }
+ }
+
+ CHECK(unsupported_extensions.empty(), HAILO_INVALID_HEF, "Failed opening non-compatible HEF with the following unsupported extensions: {}",
+ std::accumulate(std::next(unsupported_extensions.begin()), unsupported_extensions.end(), unsupported_extensions[0],
+ [] (std::string a, std::string b) { return std::move(a) + ", " + b; }));
+
+ return HAILO_SUCCESS;
+}
+
+void Hef::Impl::init_md5(MD5_SUM_t &calculated_md5)
+{
+ memcpy(m_md5, calculated_md5, sizeof(m_md5));
+}
+
+hailo_status Hef::Impl::parse_hef_file(const std::string &hef_path)
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto hef_buffer = read_binary_file(hef_path);
+ CHECK_EXPECTED_AS_STATUS(hef_buffer);
+ m_hef_buffer = hef_buffer.release();
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+ auto hef_file = std::ifstream(hef_path, std::ios::in | std::ios::binary);
+ CHECK(hef_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed to open HEF file \"{}\". errno: {}", hef_path, errno);
+
+ hef__header_t header = {};
+ hef_file.read((char*)&header, sizeof(header));
+ CHECK(hef_file.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading HEF header");
+
+ auto proto_size = get_istream_size(hef_file);
+ CHECK_EXPECTED_AS_STATUS(proto_size);
+
+ MD5_SUM_t calculated_md5 = {};
+ auto status = calc_istream_md5(hef_file, calculated_md5);
+ CHECK_SUCCESS(status);
+
+ status = validate_hef_header(header, calculated_md5, proto_size.value());
+ CHECK_SUCCESS(status);
+
+ init_md5(calculated_md5);
+
+ ProtoHEFHef hef_message;
+ auto rb = hef_message.ParseFromIstream(&hef_file);
+ CHECK(rb, HAILO_INVALID_HEF, "Failed parsing HEF file");
+ status = transfer_protobuf_field_ownership(hef_message);
+ CHECK_SUCCESS(status);
+
+ fill_core_ops();
+
+ status = fill_networks_metadata();
+ CHECK_SUCCESS(status);
+
+ // Must be called after fill_networks_metadata
+ status = validate_hef_extensions();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Hef::Impl::parse_hef_memview(const MemoryView &hef_memview)
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto hef_buffer = Buffer::create(hef_memview.data(), hef_memview.size());
+ CHECK_EXPECTED_AS_STATUS(hef_buffer);
+ m_hef_buffer = hef_buffer.release();
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+ CHECK(hef_memview.size() >= sizeof(hef__header_t), HAILO_INVALID_HEF, "Invalid HEF header");
+ const hef__header_t &header = reinterpret_cast<const hef__header_t&>(*hef_memview.data());
+
+ auto proto_buffer = (hef_memview.data() + sizeof(header));
+ auto proto_size = (hef_memview.size() - sizeof(header));
+
+ MD5_CTX md5 = {};
+ MD5_SUM_t calculated_md5 = {};
+ MD5_Init(&md5);
+ MD5_Update(&md5, proto_buffer, proto_size);
+ MD5_Final(calculated_md5, &md5);
+
+ auto status = validate_hef_header(header, calculated_md5, proto_size);
+ CHECK_SUCCESS(status);
+
+ init_md5(calculated_md5);
+
+ ProtoHEFHef hef_message;
+ auto rb = hef_message.ParseFromArray(proto_buffer, static_cast<int>(proto_size));
+ CHECK(rb, HAILO_INVALID_HEF, "Failed parsing HEF buffer");
+ status = transfer_protobuf_field_ownership(hef_message);
+ CHECK_SUCCESS(status);
+
+ fill_core_ops();
+
+ status = fill_networks_metadata();
+ CHECK_SUCCESS(status);
+
+ // Must be called after fill_networks_metadata
+ status = validate_hef_extensions();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status Hef::Impl::fill_networks_metadata()
+{
+ fill_extensions_bitset();
+
+ CoreOpMetadataPerArch metadata;
+ uint32_t partial_clusters_layout_bitmap = 0;
+
+ for (auto &network_group : m_groups) {
+ auto network_group_name = HefUtils::get_network_group_name(*network_group, m_supported_features);
+ // TODO: keep metadata per core_op (HRT-9551)
+ const auto &core_ops = m_core_ops_per_group[network_group_name];
+ assert(core_ops.size() == 1);
+ const auto &core_op = core_ops[0];
+ if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) {
+ if (m_supported_features.hailo_net_flow) {
+ for (auto &partial_core_op : core_op.partial_core_ops) {
+ partial_clusters_layout_bitmap = partial_core_op->layout.partial_clusters_layout_bitmap();
+ auto metadata_per_arch = create_metadata_per_arch(*(partial_core_op->core_op));
+ CHECK_EXPECTED_AS_STATUS(metadata_per_arch);
+ auto &&arch_metadata = metadata_per_arch.release();
+ auto expected_net_flow_ops = create_net_flow_ops(*network_group, arch_metadata);
+ CHECK_EXPECTED_AS_STATUS(expected_net_flow_ops);
+ m_post_process_ops_per_group.insert({arch_metadata.core_op_name(), expected_net_flow_ops.value()});
+ metadata.add_metadata(arch_metadata, partial_clusters_layout_bitmap);
+ }
+ } else {
+ for (auto &partial_network_group : network_group->partial_network_groups()) {
+ partial_clusters_layout_bitmap = partial_network_group.layout().partial_clusters_layout_bitmap();
+ ProtoHEFCoreOpMock partial_core_op{
+ partial_network_group.network_group().network_group_metadata(),
+ partial_network_group.network_group().preliminary_config(),
+ partial_network_group.network_group().contexts(),
+ partial_network_group.network_group().sorted_outputs_order(),
+ partial_network_group.network_group().fused_layers_metadata(),
+ partial_network_group.network_group().networks_names(),
+ {}
+ };
+ auto metadata_per_arch = create_metadata_per_arch(partial_core_op);
+ CHECK_EXPECTED_AS_STATUS(metadata_per_arch);
+ auto &&arch_metadata = metadata_per_arch.release();
+ std::vector<std::shared_ptr<NetFlowElement>> empty_ops;
+ m_post_process_ops_per_group.insert({arch_metadata.core_op_name(), empty_ops});
+ metadata.add_metadata(arch_metadata, partial_clusters_layout_bitmap);
+ }
+ }
+ } else {
+ partial_clusters_layout_bitmap = PARTIAL_CLUSTERS_LAYOUT_IGNORE;
+ auto metadata_per_arch = create_metadata_per_arch(core_op);
+ CHECK_EXPECTED_AS_STATUS(metadata_per_arch);
+ auto &&arch_metadata = metadata_per_arch.release();
+ auto expected_net_flow_ops = create_net_flow_ops(*network_group, arch_metadata);
+ CHECK_EXPECTED_AS_STATUS(expected_net_flow_ops);
+ m_post_process_ops_per_group.insert({arch_metadata.core_op_name(), expected_net_flow_ops.value()});
+ metadata.add_metadata(arch_metadata, partial_clusters_layout_bitmap);
+ }
+ CHECK(!contains(m_core_op_per_arch, network_group_name),
+ HAILO_INVALID_OPERATION, "Network group with the name {} is already configured on the device", network_group_name);
+ m_core_op_per_arch.emplace(network_group_name, metadata);
+ }
+ return HAILO_SUCCESS;
+}
+
+static Expected<std::vector<ConfigChannelInfo>> parse_config_channels_info(const ProtoHEFCoreOpMock &core_op)
+{
+ const auto &metadata = core_op.network_group_metadata;
+ // Backwards compatibility for HEFs without the cfg_channels_count field
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(metadata.cfg_channels_count()),
+ HAILO_INVALID_HEF, "Invalid cfg channels count");
+ const uint8_t cfg_channels_count = (0 == metadata.cfg_channels_count()) ?
+ 1 : static_cast<uint8_t>(metadata.cfg_channels_count());
+
+
+ std::vector<ConfigChannelInfo> config_channels_info;
+ config_channels_info.reserve(cfg_channels_count);
+ const auto &cfg_channels_config = metadata.cfg_channels_config();
+ for (uint8_t config_stream_index = 0; config_stream_index < cfg_channels_count; config_stream_index++) {
+ auto cfg_info = std::find_if(cfg_channels_config.begin(), cfg_channels_config.end(),
+ [config_stream_index](const auto &cfg_info)
+ {
+ return cfg_info.cfg_channel_index() == config_stream_index;
+ });
+
+ if (cfg_info != cfg_channels_config.end()) {
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(cfg_info->engine_id()), HAILO_INVALID_HEF, "Invalid dma engine index");
+ config_channels_info.emplace_back(ConfigChannelInfo{static_cast<uint8_t>(cfg_info->engine_id())});
+ }
+ else {
+ // Not found - can happen on old HEF or hailo8. In those case we want to use the default engine
+ config_channels_info.emplace_back(ConfigChannelInfo{vdma::DEFAULT_ENGINE_INDEX});
+ }
+ }
+
+ return config_channels_info;
+}
+
+Expected<CoreOpMetadata> Hef::Impl::create_metadata_per_arch(const ProtoHEFCoreOpMock &core_op)
+{
+ auto preliminary_context = HefUtils::parse_preliminary_context(core_op.preliminary_config, m_supported_features);
+ CHECK_EXPECTED(preliminary_context);
+
+ auto dynamic_contexts = HefUtils::parse_dynamic_contexts(core_op, m_supported_features);
+ CHECK_EXPECTED(dynamic_contexts);
+
+ auto config_channels_info = parse_config_channels_info(core_op);
+ CHECK_EXPECTED(config_channels_info);
+
+ auto sorted_output_names = HefUtils::get_sorted_output_names(core_op);
+ CHECK_EXPECTED(sorted_output_names);
+
+ std::vector<std::string> sorted_network_names;
+ if (m_supported_features.multi_network_support) {
+ sorted_network_names.reserve(core_op.networks_names.size());
+ for (auto &partial_network_name : core_op.networks_names) {
+ auto network_name = HefUtils::get_network_name(core_op, partial_network_name);
+ sorted_network_names.push_back(network_name);
+ }
+ } else {
+ sorted_network_names.push_back(HailoRTDefaults::get_network_name(core_op.network_group_metadata.network_group_name()));
+ }
+
+ // Currently, CoreOp name is the same as network_group_name, thats why we init it with it.
+ // TODO: HRT-9551 - Change it when supporting multi core ops.
+ CoreOpMetadata metadata_per_arch(core_op.network_group_metadata.network_group_name(),
+ preliminary_context.release(), dynamic_contexts.release(), config_channels_info.release(),
+ sorted_output_names.release(), m_supported_features, sorted_network_names);
+ return metadata_per_arch;
+}
+
+void Hef::Impl::fill_core_ops()
+{
+ if (m_supported_features.hailo_net_flow) {
+ for (const auto &net_group : m_groups) {
+ auto core_op_iter = std::find_if(net_group->ops().begin(), net_group->ops().end(),
+ [](auto &op) {
+ return op.op_case() == ProtoHEFOp::kCoreOp;
+ });
+ assert(core_op_iter != m_groups[0]->ops().end());
+ std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> partial_core_ops;
+ partial_core_ops.reserve(core_op_iter->core_op().partial_core_ops().size());
+ for (auto &partial_core_op : core_op_iter->core_op().partial_core_ops()) {
+ ProtoHEFCoreOpMock core_op{
+ partial_core_op.core_op().network_group_metadata(),
+ partial_core_op.core_op().preliminary_config(),
+ partial_core_op.core_op().contexts(),
+ partial_core_op.core_op().sorted_outputs_order(),
+ partial_core_op.core_op().fused_layers_metadata(),
+ partial_core_op.core_op().networks_names(),
+ {}
+ };
+ ProtoHEFPartialCoreOpMock partial_core_op_mock{
+ std::make_shared<ProtoHEFCoreOpMock>(core_op),
+ partial_core_op.layout()
+ };
+ partial_core_ops.push_back(std::make_shared<ProtoHEFPartialCoreOpMock>(partial_core_op_mock));
+ }
+ ProtoHEFCoreOpMock core_op{
+ core_op_iter->core_op().network_group_metadata(),
+ core_op_iter->core_op().preliminary_config(),
+ core_op_iter->core_op().contexts(),
+ core_op_iter->core_op().sorted_outputs_order(),
+ core_op_iter->core_op().fused_layers_metadata(),
+ core_op_iter->core_op().networks_names(),
+ partial_core_ops
+ };
+ auto net_group_name = HefUtils::get_network_group_name(*net_group, m_supported_features);
+ m_core_ops_per_group[net_group_name].push_back(std::move(core_op));
+ }
+ } else {
+ for (const auto &net_group : m_groups) {
+ std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> partial_core_ops;
+ partial_core_ops.reserve(net_group->partial_network_groups().size());
+ for (auto &partial_network_group : net_group->partial_network_groups()) {
+ ProtoHEFCoreOpMock core_op{
+ partial_network_group.network_group().network_group_metadata(),
+ partial_network_group.network_group().preliminary_config(),
+ partial_network_group.network_group().contexts(),
+ partial_network_group.network_group().sorted_outputs_order(),
+ partial_network_group.network_group().fused_layers_metadata(),
+ partial_network_group.network_group().networks_names(),
+ {}
+ };
+ ProtoHEFPartialCoreOpMock partial_core_op{
+ std::make_shared<ProtoHEFCoreOpMock>(core_op),
+ partial_network_group.layout()
+ };
+ partial_core_ops.push_back(std::make_shared<ProtoHEFPartialCoreOpMock>(partial_core_op));
+ }
+ ProtoHEFCoreOpMock core_op{
+ net_group->network_group_metadata(),
+ net_group->preliminary_config(),
+ net_group->contexts(),
+ net_group->sorted_outputs_order(),
+ net_group->fused_layers_metadata(),
+ net_group->networks_names(),
+ partial_core_ops
+ };
+ auto net_group_name = HefUtils::get_network_group_name(*net_group, m_supported_features);
+ m_core_ops_per_group[net_group_name].push_back(std::move(core_op));
+ }
+ }
+}
+
+hailo_status Hef::Impl::transfer_protobuf_field_ownership(ProtoHEFHef &hef_message)
+{
+ m_groups.reserve(hef_message.network_groups().size());
+ while (!hef_message.network_groups().empty()) {
+ // We pass the ownership from protobuf to shared_ptr (it'll call delete when the refcount drops to 0)
+ // Note: Protobuf messages are allocated with new
+ const auto network_group = hef_message.mutable_network_groups()->ReleaseLast();
+ CHECK(nullptr != network_group, HAILO_INTERNAL_FAILURE, "Null network group found while parsing HEF; Unexpected");
+ m_groups.emplace_back(network_group);
+ }
+
+ m_hef_extensions.reserve(hef_message.extensions().size());
+ for (const auto &extension : hef_message.extensions()) {
+ m_hef_extensions.emplace_back(extension);
+ }
+
+ m_header.CopyFrom(hef_message.header());
+ m_included_features.CopyFrom(hef_message.included_features());
+
+ m_hef_optional_extensions.reserve(hef_message.optional_extensions().size());
+ for (const auto &optional_extension : hef_message.optional_extensions()) {
+ m_hef_optional_extensions.emplace_back(optional_extension);
+ }
+
+ m_supported_features = get_supported_features(m_header, m_hef_extensions, m_included_features,
+ m_hef_optional_extensions);
+
+ return HAILO_SUCCESS;
+}
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+const MemoryView Hef::Impl::get_hef_memview()
+{
+ return MemoryView(m_hef_buffer);
+}
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+Hef::Impl::Impl(const std::string &hef_path, hailo_status &status)
+{
+ status = HAILO_UNINITIALIZED;
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ status = parse_hef_file(hef_path);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed parsing HEF file");
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Hef::Impl::Impl(const MemoryView &hef_memview, hailo_status &status)
+{
+ status = HAILO_UNINITIALIZED;
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+ status = parse_hef_memview(hef_memview);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed parsing HEF buffer");
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+void Hef::Impl::fill_extensions_bitset()
+{
+ for (auto extension : SUPPORTED_EXTENSIONS) {
+ m_supported_extensions_bitset[extension] = 1;
+ }
+}
+
+SupportedFeatures Hef::Impl::get_supported_features(const ProtoHEFHeader &header,
+ const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features,
+ const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions)
+{
+ SupportedFeatures supported_features{};
+ supported_features.padded_ddr_buffers = check_hef_extension(ProtoHEFExtensionType::PADDED_DDR_BUFFERS,
+ header, hef_extensions, included_features);
+ supported_features.multi_network_support = check_hef_optional_extension(ProtoHEFExtensionType::MULTI_NETWORK_VARIABLE_BATCH_SIZE,
+ header, hef_optional_extensions);
+ supported_features.multi_context = check_hef_extension(ProtoHEFExtensionType::IS_MULTI_CONTEXTS,
+ header, hef_extensions, included_features);
+ supported_features.preliminary_run_asap = check_hef_extension(ProtoHEFExtensionType::KO_RUN_ASAP,
+ header, hef_extensions, included_features);
+ supported_features.hailo_net_flow = check_hef_extension(ProtoHEFExtensionType::HAILO_NET_FLOW,
+ header, hef_extensions, included_features);
+
+ return supported_features;
+}
+
+net_flow::NmsPostProcessConfig create_nms_config(const ProtoHEFOp &op_proto)
+{
+ net_flow::NmsPostProcessConfig nms_config{};
+ nms_config.nms_score_th = (float32_t)op_proto.nms_op().nms_score_th();
+ nms_config.nms_iou_th = (float32_t)op_proto.nms_op().nms_iou_th();
+ nms_config.max_proposals_per_class = op_proto.nms_op().max_proposals_per_class();
+ nms_config.classes = op_proto.nms_op().classes();
+ nms_config.background_removal = op_proto.nms_op().background_removal();
+ nms_config.background_removal_index = op_proto.nms_op().background_removal_index();
+
+ return nms_config;
+}
+
+Expected<std::shared_ptr<net_flow::Op>> create_yolov5_op(const ProtoHEFOp &op_proto, hailo_format_t output_format,
+ const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+{
+ auto nms_config = create_nms_config(op_proto);
+ net_flow::YoloPostProcessConfig yolo_config{};
+ yolo_config.image_height = (float32_t)op_proto.nms_op().yolo_nms_op().image_height();
+ yolo_config.image_width = (float32_t)op_proto.nms_op().yolo_nms_op().image_width();
+ for (auto &bbox_proto : op_proto.nms_op().yolo_nms_op().bbox_decoders()) {
+ std::vector<int> bbox_anchors;
+ CHECK_AS_EXPECTED((bbox_proto.h().size() == bbox_proto.w().size()), HAILO_INVALID_HEF,
+ "YOLOv5 height anchors count {} doesn't mach the width anchors count {}", bbox_proto.h().size(), bbox_proto.w().size());
+ for (int i = 0; i < bbox_proto.h().size(); ++i) {
+ bbox_anchors.push_back(bbox_proto.w()[i]);
+ bbox_anchors.push_back(bbox_proto.h()[i]);
+ }
+ assert(contains(pad_index_to_streams_info, static_cast<size_t>(bbox_proto.pad_index())));
+ yolo_config.anchors.insert({pad_index_to_streams_info.at(bbox_proto.pad_index()).name, bbox_anchors});
+ }
+
+ std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
+ std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+ net_flow::BufferMetaData output_metadata{};
+ output_metadata.format = output_format;
+ outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+
+ for (auto &input_pad : op_proto.input_pads()) {
+ CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
+ "NMS op is not connected to core op");
+ auto output_pad_index = input_to_output_pads.at(input_pad.index());
+ CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
+ "Pad {} of post-process {} is not connected to any core output stream",
+ input_pad.index(), op_proto.name());
+ const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
+ net_flow::BufferMetaData input_metadata{};
+ input_metadata.format = op_input_stream.format;
+ input_metadata.quant_info = op_input_stream.quant_info;
+ input_metadata.shape = op_input_stream.shape;
+ input_metadata.padded_shape = op_input_stream.hw_shape;
+ inputs_metadata.insert({op_input_stream.name, input_metadata});
+ }
+ return net_flow::YOLOv5PostProcessOp::create(inputs_metadata, outputs_metadata, nms_config, yolo_config);
+}
+
+Expected<std::shared_ptr<net_flow::Op>> create_yolox_op(const ProtoHEFOp &op_proto, hailo_format_t output_format,
+ const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+{
+ auto nms_config = create_nms_config(op_proto);
+ net_flow::YoloPostProcessConfig yolo_config{};
+ yolo_config.image_height = (float32_t)op_proto.nms_op().yolo_nms_op().image_height();
+ yolo_config.image_width = (float32_t)op_proto.nms_op().yolo_nms_op().image_width();
+
+ std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
+ std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+ net_flow::BufferMetaData output_metadata{};
+ output_metadata.format = output_format;
+ outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+
+ for (auto &input_pad : op_proto.input_pads()) {
+ CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
+ "NMS op is not connected to core op");
+ auto output_pad_index = input_to_output_pads.at(input_pad.index());
+ CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
+ "Pad {} of post-process {} is not connected to any core output stream",
+ input_pad.index(), op_proto.name());
+ const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
+ net_flow::BufferMetaData input_metadata{};
+ input_metadata.format = op_input_stream.format;
+ input_metadata.quant_info = op_input_stream.quant_info;
+ input_metadata.shape = op_input_stream.shape;
+ input_metadata.padded_shape = op_input_stream.hw_shape;
+ inputs_metadata.insert({op_input_stream.name, input_metadata});
+ }
+ return net_flow::YOLOXPostProcessOp::create(inputs_metadata, outputs_metadata, nms_config, yolo_config);
+}
+
+Expected<std::shared_ptr<net_flow::Op>> create_ssd_op(const ProtoHEFOp &op_proto, hailo_format_t output_format,
+ const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+{
+ auto nms_config = create_nms_config(op_proto);
+ net_flow::SSDPostProcessConfig ssd_config{};
+ ssd_config.image_height = (float32_t)op_proto.nms_op().ssd_nms_op().image_height();
+ ssd_config.image_width = (float32_t)op_proto.nms_op().ssd_nms_op().image_width();
+ ssd_config.centers_scale_factor = op_proto.nms_op().ssd_nms_op().centers_scale_factor();
+ ssd_config.bbox_dimensions_scale_factor = op_proto.nms_op().ssd_nms_op().bbox_dimensions_scale_factor();
+ ssd_config.ty_index = op_proto.nms_op().ssd_nms_op().ty();
+ ssd_config.tx_index = op_proto.nms_op().ssd_nms_op().tx();
+ ssd_config.th_index = op_proto.nms_op().ssd_nms_op().th();
+ ssd_config.tw_index = op_proto.nms_op().ssd_nms_op().tw();
+
+ if ((ssd_config.ty_index == 0) && (ssd_config.tx_index == 0) && (ssd_config.th_index == 0) && (ssd_config.tw_index == 0)) {
+ ssd_config.ty_index = net_flow::SSDPostProcessOp::DEFAULT_Y_OFFSET_IDX;
+ ssd_config.tx_index = net_flow::SSDPostProcessOp::DEFAULT_X_OFFSET_IDX;
+ ssd_config.th_index = net_flow::SSDPostProcessOp::DEFAULT_H_OFFSET_IDX;
+ ssd_config.tw_index = net_flow::SSDPostProcessOp::DEFAULT_W_OFFSET_IDX;
+ }
+
+ for (auto &bbox_proto : op_proto.nms_op().ssd_nms_op().bbox_decoders()) {
+ std::vector<float32_t> bbox_anchors;
+ assert(bbox_proto.h().size() == bbox_proto.w().size());
+ for (int i = 0; i < bbox_proto.h().size(); ++i) {
+ bbox_anchors.push_back(bbox_proto.w()[i]);
+ bbox_anchors.push_back(bbox_proto.h()[i]);
+ }
+ assert(contains(pad_index_to_streams_info, static_cast<size_t>(bbox_proto.reg_pad_index())));
+ auto reg_name = pad_index_to_streams_info.at(bbox_proto.reg_pad_index()).name;
+ ssd_config.anchors.insert({reg_name, bbox_anchors});
+ assert(contains(pad_index_to_streams_info, static_cast<size_t>(bbox_proto.cls_pad_index())));
+ auto cls_name = pad_index_to_streams_info.at(bbox_proto.cls_pad_index()).name;
+ ssd_config.anchors.insert({pad_index_to_streams_info.at(bbox_proto.cls_pad_index()).name, bbox_anchors});
+ ssd_config.reg_to_cls_inputs.insert({reg_name, cls_name});
+ }
+
+ std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
+ std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+ net_flow::BufferMetaData output_metadata{};
+ output_metadata.format = output_format;
+ outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+
+ for (auto &input_pad : op_proto.input_pads()) {
+ CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
+ "NMS op is not connected to core op");
+ auto output_pad_index = input_to_output_pads.at(input_pad.index());
+ CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
+ "Pad {} of post-process {} is not connected to any core output stream",
+ input_pad.index(), op_proto.name());
+ const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
+ net_flow::BufferMetaData input_metadata{};
+ input_metadata.format = op_input_stream.format;
+ input_metadata.quant_info = op_input_stream.quant_info;
+ input_metadata.shape = op_input_stream.shape;
+ input_metadata.padded_shape = op_input_stream.hw_shape;
+ inputs_metadata.insert({op_input_stream.name, input_metadata});
+ }
+ return net_flow::SSDPostProcessOp::create(inputs_metadata, outputs_metadata, nms_config, ssd_config);
+}
+
+Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_net_flow_ops(const ProtoHEFNetworkGroup &network_group_proto,
+ CoreOpMetadata &core_op_metadata) const
+{
+ std::vector<std::shared_ptr<NetFlowElement>> result;
+ if (!m_supported_features.hailo_net_flow) {
+ return result;
+ }
+ auto output_layer_infos = core_op_metadata.get_output_layer_infos();
+ std::map<size_t, LayerInfo> pad_index_to_streams_info;
+ for (auto &output_layer_info : output_layer_infos) {
+ if (output_layer_info.pad_index != INVALID_PAD_INDEX) {
+ pad_index_to_streams_info.insert({output_layer_info.pad_index, output_layer_info});
+ }
+ }
+ std::map<size_t, size_t> input_to_output_pads;
+ for (auto &pad_edge : network_group_proto.pad_edges()) {
+ input_to_output_pads.insert({pad_edge.dst(), pad_edge.src()});
+ }
+ for (auto &op_proto : network_group_proto.ops()) {
+ switch (op_proto.op_case()) {
+ case ProtoHEFOp::kCoreOp: {
+ break;
+ }
+ case ProtoHEFOp::kNmsOp: {
+ hailo_format_t output_format{};
+ output_format.type = HAILO_FORMAT_TYPE_FLOAT32;
+ output_format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
+ output_format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+ NetFlowElement net_flow_element{};
+
+ // TODO: HRT-9902 - Move nms_info to be an op member instead of NetFlowElement
+ net_flow_element.nms_info = {
+ op_proto.nms_op().classes(),
+ op_proto.nms_op().max_proposals_per_class(),
+ sizeof(hailo_bbox_float32_t),
+ 1, // input_division_factor
+ false,
+ hailo_nms_defuse_info_t()
+ };
+ for (auto &input_pad : op_proto.input_pads()) {
+ CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
+ "NMS op is not connected to core-op");
+ auto output_pad_index = input_to_output_pads.at(input_pad.index());
+ CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
+ "Pad {} of post-process {} is not connected to any core output stream",
+ input_pad.index(), op_proto.name());
+ const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
+ net_flow_element.input_streams.insert(op_input_stream.name);
+ }
+ std::shared_ptr<net_flow::Op> post_process_op;
+ switch (op_proto.nms_op().nms_op_case()) {
+ case ProtoHEFNmsOp::kYoloNmsOp: {
+ net_flow_element.name = "YOLO-Post-Process";
+ auto expected_post_process_op = create_yolov5_op(op_proto, output_format, pad_index_to_streams_info, input_to_output_pads);
+ CHECK_EXPECTED(expected_post_process_op);
+ post_process_op = expected_post_process_op.release();
+ break;
+ }
+ case ProtoHEFNmsOp::kYoloxNmsOp: {
+ net_flow_element.name = "YOLOX-Post-Process";
+ auto expected_post_process_op = create_yolox_op(op_proto, output_format, pad_index_to_streams_info, input_to_output_pads);
+ CHECK_EXPECTED(expected_post_process_op);
+ post_process_op = expected_post_process_op.release();
+ break;
+ }
+ case ProtoHEFNmsOp::kSsdNmsOp: {
+ net_flow_element.name = "SSD-Post-Process";
+ auto expected_post_process_op = create_ssd_op(op_proto, output_format, pad_index_to_streams_info, input_to_output_pads);
+ CHECK_EXPECTED(expected_post_process_op);
+ post_process_op = expected_post_process_op.release();
+ break;
+ }
+ case ProtoHEFNmsOp::kIouOp: {
+ // TODO (HRT-8827)
+ break;
+ }
+ default: {
+ LOGGER__ERROR("Unsupported Net-Flow NMS-Op");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+ }
+ net_flow_element.op = post_process_op;
+
+ // Fill meta-data output vstream info
+ auto net_group_name = HefUtils::get_network_group_name(network_group_proto, m_supported_features);
+ auto network_name = HailoRTDefaults::get_network_name(net_group_name);
+ hailo_vstream_info_t net_flow_output_vstream_info{};
+ assert(op_proto.output_pads().size() == 1);
+ auto proto_output_pad = op_proto.output_pads()[0];
+ strncpy(net_flow_output_vstream_info.name, proto_output_pad.name().c_str(), proto_output_pad.name().length() + 1);
+ strncpy(net_flow_output_vstream_info.network_name, network_name.c_str(), network_name.length() + 1);
+ net_flow_output_vstream_info.direction = HAILO_D2H_STREAM;
+ net_flow_output_vstream_info.format = output_format;
+ net_flow_output_vstream_info.nms_shape.max_bboxes_per_class = op_proto.nms_op().max_proposals_per_class();
+ net_flow_output_vstream_info.nms_shape.number_of_classes = op_proto.nms_op().classes();
+ if (op_proto.nms_op().background_removal()) {
+ net_flow_output_vstream_info.nms_shape.number_of_classes--;
+ net_flow_element.nms_info.number_of_classes--;
+ }
+
+ result.push_back(std::make_shared<NetFlowElement>(net_flow_element));
+
+ // TODO: HRT-9546 - Move vstreams out of core op
+ core_op_metadata.add_output_vstream_info(net_flow_output_vstream_info);
+ break;
+ }
+ default: {
+ LOGGER__ERROR("Unsupported Net-Flow Op");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+ }
+ }
+ return result;
+}
+
+Expected<CoreOpMetadata> Hef::Impl::get_core_op_metadata(const std::string &network_group_name, uint32_t partial_clusters_layout_bitmap)
+{
+ CHECK_AS_EXPECTED(contains(m_core_op_per_arch, network_group_name), HAILO_NOT_FOUND,
+ "Network group with name {} wasn't found", network_group_name);
+ auto metadata_per_arch = m_core_op_per_arch.at(network_group_name);
+ auto metadata = metadata_per_arch.get_metadata(partial_clusters_layout_bitmap);
+ return metadata;
+}
+
+hailo_status Hef::Impl::validate_boundary_streams_were_created(const std::string &network_group_name, std::shared_ptr<CoreOp> core_op)
+{
+ auto number_of_inputs = get_number_of_input_streams(network_group_name);
+ CHECK_EXPECTED_AS_STATUS(number_of_inputs);
+
+ auto size = core_op->get_input_streams().size();
+ CHECK((number_of_inputs.value() == size),
+ HAILO_INVALID_ARGUMENT, "passed configure_params for network group {} did not contain all input streams", network_group_name);
+
+ auto number_of_outputs = get_number_of_output_streams(network_group_name);
+ CHECK_EXPECTED_AS_STATUS(number_of_inputs);
+ CHECK((number_of_outputs.value() == core_op->get_output_streams().size()),
+ HAILO_INVALID_ARGUMENT, "passed configure_params for network group {} did not contain all output streams", network_group_name);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status get_hw_padding_params(hailo_format_order_t format_order, uint32_t width, uint32_t features, uint32_t hw_data_bytes,
+ uint16_t &feature_padding_payload, uint16_t &periph_bytes_per_buffer)
+{
+ uint32_t feature_padding_payload_32bit = 0;
+ uint32_t periph_bytes_per_buffer_32bit = 0;
+
+ // TODO: HRT-3278 dont assume core_buffers_per_frame == height
+ switch (format_order)
+ {
+ case HAILO_FORMAT_ORDER_NHCW:
+ case HAILO_FORMAT_ORDER_NHW:
+ feature_padding_payload_32bit = width * hw_data_bytes;
+ periph_bytes_per_buffer_32bit = feature_padding_payload_32bit * features;
+ break;
+ case HAILO_FORMAT_ORDER_NHWC:
+ case HAILO_FORMAT_ORDER_FCR:
+ case HAILO_FORMAT_ORDER_F8CR:
+ case HAILO_FORMAT_ORDER_NC:
+ case HAILO_FORMAT_ORDER_BAYER_RGB:
+ case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
+ case HAILO_FORMAT_ORDER_RGB888:
+ feature_padding_payload_32bit = features * hw_data_bytes;
+ periph_bytes_per_buffer_32bit = feature_padding_payload_32bit * width;
+ break;
+ default:
+ LOGGER__ERROR("unsupported format for HW padding");
+ return HAILO_INTERNAL_FAILURE;
+ }
+
+ CHECK(IS_FIT_IN_UINT16(feature_padding_payload_32bit), HAILO_INVALID_HEF,
+ "frame width {} is too big", feature_padding_payload_32bit);
+ CHECK(IS_FIT_IN_UINT16(periph_bytes_per_buffer_32bit), HAILO_INVALID_HEF,
+ "unpadded bytes per buffer {} is too big", periph_bytes_per_buffer_32bit);
+
+ feature_padding_payload = static_cast<uint16_t>(feature_padding_payload_32bit);
+ periph_bytes_per_buffer = static_cast<uint16_t>(periph_bytes_per_buffer_32bit);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(hailo_format_order_t format_order, uint32_t width, uint32_t features,
+ uint32_t hw_data_bytes, uint16_t core_buffers_per_frame, uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr)
+{
+ CONTROL_PROTOCOL__nn_stream_config_t stream_config = {};
+
+ stream_config.core_buffers_per_frame = core_buffers_per_frame;
+ stream_config.core_bytes_per_buffer = core_bytes_per_buffer;
+ stream_config.periph_buffers_per_frame = core_buffers_per_frame; // periph buffers per frame is the same (even if
+ // for hw padding each buffer is smaller).
+
+
+ /* For DDR buffering - core buffers is depended on the amount of buffers per PCIe interrupt. No HW padding required */
+ if (is_ddr) {
+ stream_config.core_buffers_per_frame = 1;
+ stream_config.feature_padding_payload = 0;
+ stream_config.periph_bytes_per_buffer = stream_config.core_bytes_per_buffer;
+ } else {
+ if (hw_padding_supported) {
+ auto status = get_hw_padding_params(format_order, width, features, hw_data_bytes,
+ stream_config.feature_padding_payload, stream_config.periph_bytes_per_buffer);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else {
+ stream_config.feature_padding_payload = 0;
+ stream_config.periph_bytes_per_buffer = stream_config.core_bytes_per_buffer;
+ }
+ /* For now, no support for buffer padding */
+ stream_config.buffer_padding_payload = 0;
+ stream_config.buffer_padding = 0;
+ }
+ return stream_config;
+}
+
+Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(const ProtoHEFEdgeLayerBase &edge_layer,
+ bool hw_padding_supported, const ProtoHEFEdgeConnectionType &edge_connection_type)
+{
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(edge_layer.core_bytes_per_buffer()), HAILO_INVALID_HEF,
+ "core_bytes_per_buffer is too big");
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(edge_layer.core_buffers_per_frame()), HAILO_INVALID_HEF,
+ "core_buffers_per_frame is too big");
+
+ auto format_order_exp = HailoRTDefaults::get_device_format_order(edge_layer.format());
+ CHECK_EXPECTED(format_order_exp);
+ auto format_order = format_order_exp.release();
+ auto is_ddr = ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR == edge_connection_type;
+
+ // Width and features only used in case hw_padding is supported. In that case, they represent the HW shape (without padding)
+ return parse_nn_stream_config(format_order, edge_layer.width(), edge_layer.features(),
+ edge_layer.data_bytes(), static_cast<uint16_t>(edge_layer.core_buffers_per_frame()),
+ static_cast<uint16_t>(edge_layer.core_bytes_per_buffer()), hw_padding_supported, is_ddr);
+}
+
+Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(const LayerInfo &edge_layer, bool hw_padding_supported)
+{
+ // TODO HRT-7177 - pass interface to layer info instead of re-calculated Layer info from stream_internal.hpp
+ // After passing stream interface, there is no need for this function. Just use CONTROL_PROTOCOL__nn_stream_config_t from layer info.
+ auto is_ddr = false; // This function is called only on boundary layers, so no DDR
+ return parse_nn_stream_config(edge_layer.format.order, edge_layer.hw_shape.width, edge_layer.hw_shape.features,
+ edge_layer.hw_data_bytes, edge_layer.nn_stream_config.core_buffers_per_frame,
+ edge_layer.nn_stream_config.core_bytes_per_buffer, hw_padding_supported, is_ddr);
+}
+
+bool HefConfigurator::is_hw_padding_supported(bool is_boundary, bool is_mux, hailo_format_order_t format_order,
+ uint16_t core_buffers_per_frame, uint32_t height, uint32_t width, uint32_t features, uint32_t hw_data_bytes)
+{
+ if (!is_boundary || is_mux) {
+ return false;
+ }
+
+ // TODO: HRT-4462 support more orders
+ switch (format_order)
+ {
+ case HAILO_FORMAT_ORDER_NHCW:
+ break;
+ default:
+ LOGGER__DEBUG("HW padding is not supported for format {} ", format_order);
+ return false;
+ }
+
+ if (core_buffers_per_frame != height) {
+ // TODO: HRT-3278
+ LOGGER__DEBUG("HW padding is supported only on layers with core_buffers_per_frame == height");
+ return false;
+ }
+
+ if (((width * features) % 8) != 0) {
+ // TODO: HRT-963 support chunks
+ LOGGER__DEBUG("HW padding is supported only when periph_bytes_per_buffer is a multiple of 8");
+ return false;
+ }
+
+ if ((width * features * hw_data_bytes) >
+ (HAILO8_INBOUND_DATA_STREAM_SIZE - 1)) {
+ // TODO: HRT-4177
+ LOGGER__DEBUG("HW padding is supported only on layers with features * width * data size > stream size");
+ return false;
+ }
+ return true;
+}
+
+bool HefConfigurator::is_hw_padding_supported(const LayerInfo &layer_info)
+{
+ /* If the network is transposed, the width and height are swapped in LayerInfo c'tor, so need to swap it again for calculations */
+ auto height = layer_info.shape.height;
+ auto width = layer_info.shape.width;
+ if (layer_info.format.flags & HAILO_FORMAT_FLAGS_TRANSPOSED) {
+ std::swap(height, width);
+ }
+
+ auto is_boundary = true; // This function is called only on boundary layers
+ return is_hw_padding_supported(is_boundary, layer_info.is_mux, layer_info.format.order,
+ layer_info.nn_stream_config.core_buffers_per_frame, height, width,
+ layer_info.shape.features, layer_info.hw_data_bytes);
+}
+
+bool HefConfigurator::is_hw_padding_supported(const ProtoHEFEdgeLayer &edge_layer)
+{
+ auto is_boundary = (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY == edge_layer.context_switch_info().edge_connection_type());
+ auto is_mux = (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == edge_layer.edge_layer_type());
+ auto edge_layer_base = edge_layer.layer_info().edge_layer_base();
+ auto format_order_exp = HailoRTDefaults::get_device_format_order(edge_layer_base.format());
+ if (!format_order_exp) {
+ LOGGER__DEBUG("Failed to get format order. Not enabling hw padding");
+ return false;
+ }
+
+ if (!IS_FIT_IN_UINT16(edge_layer_base.core_buffers_per_frame())) {
+ LOGGER__DEBUG("Invalid core_buffers_per_frame. Not enabling hw padding");
+ return false;
+ }
+
+ auto format_order = format_order_exp.release();
+ return is_hw_padding_supported(is_boundary, is_mux, format_order, static_cast<uint16_t>(edge_layer_base.core_buffers_per_frame()),
+ edge_layer_base.height(), edge_layer_base.width(), edge_layer_base.features(), edge_layer_base.data_bytes());
+}
+
+Expected<std::vector<hailo_stream_info_t>> Hef::Impl::get_input_stream_infos(const std::string &net_group_name,
+ const std::string &network_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_input_stream_infos(network_name);
+}
+
+Expected<std::vector<hailo_stream_info_t>> Hef::Impl::get_output_stream_infos(const std::string &net_group_name,
+ const std::string &network_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_output_stream_infos(network_name);
+}
+
+Expected<std::vector<hailo_stream_info_t>> Hef::Impl::get_all_stream_infos(const std::string &net_group_name,
+ const std::string &network_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_all_stream_infos(network_name);
+}
+
+Expected<std::vector<hailo_network_info_t>> Hef::Impl::get_network_infos(const std::string &net_group_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_network_infos();
+}
+
+Expected<hailo_stream_info_t> Hef::Impl::get_stream_info_by_name(const std::string &stream_name,
+ hailo_stream_direction_t stream_direction, const std::string &net_group_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+
+ if (HAILO_H2D_STREAM == stream_direction) {
+ auto stream_infos = network_group_metadata->get_input_stream_infos();
+ CHECK_EXPECTED(stream_infos);
+ for (auto &stream_info : stream_infos.value()) {
+ if (stream_name == stream_info.name) {
+ return std::move(stream_info);
+ }
+ }
+ } else {
+ auto stream_infos = network_group_metadata->get_output_stream_infos();
+ CHECK_EXPECTED(stream_infos);
+ for (auto &stream_info : stream_infos.value()) {
+ if (stream_name == stream_info.name) {
+ return std::move(stream_info);
+ }
+ }
+ }
+
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> Hef::Impl::get_input_vstream_infos(const std::string &net_group_name,
+ const std::string &network_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_input_vstream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> Hef::Impl::get_output_vstream_infos(const std::string &net_group_name,
+ const std::string &network_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_output_vstream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> Hef::Impl::get_all_vstream_infos(const std::string &net_group_name,
+ const std::string &network_name)
+{
+ auto network_group_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(network_group_metadata);
+ return network_group_metadata->get_all_vstream_infos(network_name);
+}
+
+const std::vector<ProtoHEFNetworkGroupPtr>& Hef::Impl::network_groups() const
+{
+ return m_groups;
+};
+
+const std::vector<ProtoHEFCoreOpMock>& Hef::Impl::core_ops(const std::string &net_group_name) const
+{
+ assert(contains(m_core_ops_per_group, net_group_name));
+ return m_core_ops_per_group.at(net_group_name);
+};
+
+const std::vector<std::shared_ptr<NetFlowElement>> Hef::Impl::post_process_ops(const std::string &net_group_name) const
+{
+ assert(contains(m_post_process_ops_per_group, net_group_name));
+ return m_post_process_ops_per_group.at(net_group_name);
+}
+
+bool Hef::Impl::check_hef_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
+ const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features)
+{
+ if (header.version() > 0) {
+ return std::find_if(hef_extensions.begin(), hef_extensions.end(),
+ [extension] (const ProtoHEFExtension &extended_feature) { return ((ProtoHEFExtensionType)extended_feature.type_index()) == extension; }) != hef_extensions.end();
+ }
+
+ // ProtoHEFIncludedFeature is deprecated
+ switch (extension) {
+ case ProtoHEFExtensionType::ABBALE:
+ return included_features.abbale();
+ case ProtoHEFExtensionType::POSTED_WRITES:
+ return included_features.posted_writes();
+ case ProtoHEFExtensionType::DDR:
+ return included_features.ddr();
+ case ProtoHEFExtensionType::IS_MULTI_CONTEXTS:
+ return included_features.is_multi_context();
+ case ProtoHEFExtensionType::COMPRESSED_PARAMS:
+ return included_features.compressed_params();
+ case ProtoHEFExtensionType::TRANSPOSE_COMPONENT:
+ return included_features.transpose_component();
+ case ProtoHEFExtensionType::PADDED_DDR_BUFFERS:
+ return included_features.padded_ddr_buffers();
+ default:
+ return false;
+ }
+}
+
+bool Hef::Impl::check_hef_optional_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
+ const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions)
+{
+ if (header.version() > 0) {
+ return std::find_if(hef_optional_extensions.begin(), hef_optional_extensions.end(),
+ [extension] (const ProtoHEFOptionalExtension &extended_feature) { return ((ProtoHEFExtensionType)extended_feature.type_index()) == extension; }) != hef_optional_extensions.end();
+ }
+
+ /* optional extensions are only for m_header.version() > 0.
+ For lower version, those features are not supported */
+ return false;
+}
+
+Expected<std::pair<std::string, std::string>> Hef::Impl::get_network_group_and_network_name(const std::string &name)
+{
+ std::string network_group_name;
+ if (name.empty()) {
+ // Name is not given - addressing all networks in the first network_group
+ network_group_name = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
+ m_groups[0]->partial_network_groups(0).network_group().network_group_metadata().network_group_name()
+ : m_groups[0]->network_group_metadata().network_group_name();
+ LOGGER__INFO("No name was given. Addressing all networks of default network_group: {}",
+ network_group_name);
+ auto network_name = HailoRTDefaults::get_network_name(network_group_name);
+ return std::make_pair(network_group_name, network_name);
+ } else {
+ const ProtoHEFNetworkGroup *network_group_ptr = nullptr;
+ for (const auto &network_group : m_groups) {
+ // TODO: Handle new HEFs
+ network_group_ptr = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
+ &network_group->partial_network_groups(0).network_group()
+ : network_group.get();
+ network_group_name = network_group_ptr->network_group_metadata().network_group_name();
+
+ // Look for network_group with the given name
+ if (name == network_group_name) {
+ auto network_name = HailoRTDefaults::get_network_name(network_group_name);
+ return std::make_pair(network_group_name, network_name);
+ }
+ // Look for network with the given name
+ for (const auto &partial_network_name : network_group_ptr->networks_names()) {
+ auto full_network_name = HefUtils::get_network_name(network_group_name, partial_network_name);
+ if (name == full_network_name) {
+ return std::make_pair(network_group_name, full_network_name);
+ }
+ }
+ // Handle case of deafult_network_name
+ if (name == HailoRTDefaults::get_network_name(network_group_name)) {
+ return std::make_pair(network_group_name, name);
+ }
+ }
+ }
+
+ LOGGER__ERROR("Failed to find network or network_group with the name {}",
+ name);
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+// TODO: core_ops names?
+Expected<std::shared_ptr<ProtoHEFCoreOpMock>> Hef::Impl::get_core_op_by_net_group_name(const std::string &net_group_name)
+{
+ if ("" == net_group_name) {
+ auto network_group_ptr = m_groups[0];
+ auto network_group_name = HefUtils::get_network_group_name(*network_group_ptr, m_supported_features);
+ LOGGER__INFO("No network_group name was given. Addressing default network_group: {}", network_group_name);
+ const auto &core_op = m_core_ops_per_group[network_group_name][0];
+ if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) {
+ auto partial_core_op = core_op.partial_core_ops[0];
+ return std::make_shared<ProtoHEFCoreOpMock>(*(partial_core_op->core_op));
+ }
+ return std::make_shared<ProtoHEFCoreOpMock>(core_op);
+ }
+ CHECK_AS_EXPECTED(contains(m_core_ops_per_group, net_group_name), HAILO_NOT_FOUND,
+ "HEF does not contain network_group with name {}", net_group_name);
+ const auto &core_op = m_core_ops_per_group[net_group_name][0];
+ if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) {
+ auto partial_core_op = core_op.partial_core_ops[0];
+ return std::make_shared<ProtoHEFCoreOpMock>(*(partial_core_op->core_op));
+ }
+ return std::make_shared<ProtoHEFCoreOpMock>(core_op);
+}
+
+Expected<size_t> Hef::Impl::get_number_of_input_streams(const std::string &net_group_name)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ auto input_layer_infos = core_op_metadata->get_input_layer_infos();
+ return input_layer_infos.size();
+}
+
+Expected<size_t> Hef::Impl::get_number_of_output_streams(const std::string &net_group_name)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ auto output_layer_infos = core_op_metadata->get_output_layer_infos();
+ return output_layer_infos.size();
+}
+
+static Expected<LayerType> get_layer_type(const ProtoHEFEdgeConnectionType &edge_connection_type)
+{
+ switch (edge_connection_type) {
+ case PROTO__EDGE_CONNECTION_TYPE__BOUNDARY:
+ return LayerType::BOUNDARY;
+ case PROTO__EDGE_CONNECTION_TYPE__INTERMEDIATE:
+ return LayerType::INTER_CONTEXT;
+ case PROTO__EDGE_CONNECTION_TYPE__DDR:
+ return LayerType::DDR;
+ default:
+ LOGGER__ERROR("Not supported edge connection type {}", edge_connection_type);
+ return make_unexpected(HAILO_INVALID_HEF);
+ }
+}
+
+hailo_status HefUtils::fill_layer_info_with_base_info(const ProtoHEFEdgeLayerBase &base_info,
+ const ProtoHEFEdgeConnectionType &edge_connection_type, const ProtoHEFNetworkGroupMetadata &network_group_proto,
+ bool hw_padding_supported, bool transposed, const uint8_t context_index, const uint8_t network_index,
+ LayerInfo &layer_info)
+{
+ auto format_order_exp = HailoRTDefaults::get_device_format_order(base_info.format());
+ CHECK_EXPECTED_AS_STATUS(format_order_exp);
+
+ auto format_oder = format_order_exp.release();
+
+ auto layer_type = get_layer_type(edge_connection_type);
+ CHECK_EXPECTED_AS_STATUS(layer_type);
+ layer_info.type = layer_type.value();
+
+ if (HEF__FORMAT__NMS != base_info.format()) {
+ layer_info.shape.height = base_info.height();
+ layer_info.shape.width = base_info.width();
+ layer_info.shape.features = base_info.features();
+ } else {
+ layer_info.shape.height = static_cast<uint32_t>(base_info.additional_info().nms_info().number_of_classes());
+ layer_info.shape.width = HailoRTCommon::BBOX_PARAMS;
+ layer_info.shape.features = static_cast<uint32_t>(base_info.additional_info().nms_info().max_output_size() *
+ base_info.additional_info().nms_info().input_division_factor());
+ }
+ if (hw_padding_supported) {
+ layer_info.hw_shape.height = base_info.height();
+ layer_info.hw_shape.width = base_info.width();
+ layer_info.hw_shape.features = base_info.features();
+ }
+ else {
+ layer_info.hw_shape.height = base_info.padded_height();
+ layer_info.hw_shape.width = base_info.padded_width();
+ layer_info.hw_shape.features = base_info.padded_features();
+ }
+ layer_info.hw_data_bytes = base_info.data_bytes();
+
+ // TODO: remove duplications with stream info parse
+ layer_info.format.order = format_oder;
+ layer_info.format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+
+ // The check network_group_proto.transposed_net() is for supporting backward compatability for old hefs
+ if ((network_group_proto.transposed_net() || transposed) && (layer_info.format.order != HAILO_FORMAT_ORDER_NC)) {
+ std::swap(layer_info.shape.height, layer_info.shape.width);
+ layer_info.format.flags |= HAILO_FORMAT_FLAGS_TRANSPOSED;
+ }
+
+ if (base_info.host_argmax()) {
+ layer_info.format.flags |= HAILO_FORMAT_FLAGS_HOST_ARGMAX;
+ layer_info.shape.features = 1;
+ }
+
+ auto type = HailoRTCommon::get_format_type(layer_info.hw_data_bytes);
+ CHECK_EXPECTED_AS_STATUS(type);
+ layer_info.format.type = type.value();
+
+ auto nn_stream_config = HefConfigurator::parse_nn_stream_config(base_info, hw_padding_supported,
+ edge_connection_type);
+ CHECK_EXPECTED_AS_STATUS(nn_stream_config, "Failed parse nn stream config");
+ layer_info.nn_stream_config = nn_stream_config.release();
+ layer_info.network_index = network_index;
+ layer_info.context_index = context_index;
+
+ CHECK(IS_FIT_IN_UINT8(base_info.sys_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid sys_index: {}.", base_info.sys_index());
+ layer_info.stream_index = static_cast<uint8_t>(base_info.sys_index());
+ CHECK(IS_FIT_IN_UINT8(base_info.engine_id()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid engine_id: {}.", base_info.engine_id());
+ layer_info.dma_engine_index = static_cast<uint8_t>(base_info.engine_id());
+
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info.format.order) {
+ auto expected_nms_info = parse_proto_nms_info(base_info.additional_info().nms_info());
+ CHECK_EXPECTED_AS_STATUS(expected_nms_info);
+ layer_info.nms_info = expected_nms_info.release();
+ }
+
+ layer_info.max_shmifo_size = base_info.max_shmifo_size();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::fill_layer_info(const ProtoHEFEdgeLayerInfo &info,
+ const ProtoHEFEdgeConnectionType &edge_connection_type,
+ const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
+ bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
+ uint8_t network_index, LayerInfo &layer_info)
+{
+ auto status = fill_layer_info_with_base_info(info.edge_layer_base(), edge_connection_type, core_op.network_group_metadata,
+ hw_padding_supported, info.transposed(), context_index, network_index, layer_info);
+ CHECK_SUCCESS(status);
+
+ if (HAILO_MAX_STREAM_NAME_SIZE < (info.name().length() + 1)) {
+ LOGGER__ERROR("The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.name());
+ return HAILO_INTERNAL_FAILURE;
+ }
+ if (HAILO_MAX_NETWORK_NAME_SIZE < (partial_network_name.length() + 1)) {
+ LOGGER__ERROR("The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", partial_network_name);
+ return HAILO_INTERNAL_FAILURE;
+ }
+ layer_info.name = info.name();
+
+ layer_info.network_name = HefUtils::get_network_name(core_op, partial_network_name);
+ layer_info.is_mux = false;
+ layer_info.direction = direction;
+ layer_info.quant_info.limvals_max = info.numeric_info().limvals_max();
+ layer_info.quant_info.limvals_min = info.numeric_info().limvals_min();
+ layer_info.quant_info.qp_scale = info.numeric_info().qp_scale();
+ layer_info.quant_info.qp_zp = info.numeric_info().qp_zp();
+ // Simulation info
+ assert (1 == info.edge_layer_base().buffer_indices_size());
+ layer_info.buffer_indices.cluster_index = info.edge_layer_base().buffer_indices(0).cluster_index();
+ layer_info.buffer_indices.index = info.edge_layer_base().buffer_indices(0).index();
+
+ layer_info.is_defused_nms = core_op.fused_layers_metadata.network_has_fused_layers() &&
+ (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info.format.order) && layer_info.nms_info.is_defused;
+
+ if (layer_info.is_defused_nms) {
+ for (const auto &fused_layer : core_op.fused_layers_metadata.fused_layers()) {
+ if (fused_layer.layer_info().name() == layer_info.nms_info.defuse_info.original_name) {
+ // This creates a new LayerInfo for the fused layer *for each defused layer*, even though they all share the same fused layer.
+ // TODO Make it so all defused layer reference the same LayerInfo of the fused layer.
+ LayerInfo fused_layer_info = {};
+ status = fill_fused_nms_info(fused_layer, fused_layer_info, layer_info.quant_info, layer_info.network_name);
+ CHECK_SUCCESS(status);
+ layer_info.fused_nms_layer.push_back(fused_layer_info);
+ break;
+ }
+ }
+ CHECK(0 != layer_info.fused_nms_layer.size(), HAILO_NOT_FOUND, "Could not find the fused layer {}", layer_info.nms_info.defuse_info.original_name);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::fill_fused_nms_info(const ProtoHEFEdgeLayerFused &info, LayerInfo &layer_info,
+ hailo_quant_info_t &defuse_quant_info, const std::string &network_name)
+{
+ auto base_info = info.layer_info().edge_layer_base();
+ auto format_order_exp = HailoRTDefaults::get_device_format_order(base_info.format());
+ CHECK_EXPECTED_AS_STATUS(format_order_exp);
+ layer_info.format.order = format_order_exp.release();
+ layer_info.format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+
+ layer_info.shape.height = static_cast<uint32_t>(info.nms_info().number_of_classes());
+ layer_info.shape.width = HailoRTCommon::BBOX_PARAMS;
+ layer_info.shape.features = static_cast<uint32_t>(info.nms_info().max_output_size() *
+ info.nms_info().input_division_factor());
+
+ layer_info.hw_data_bytes = base_info.data_bytes();
+
+ auto type = HailoRTCommon::get_format_type(layer_info.hw_data_bytes);
+ CHECK_EXPECTED_AS_STATUS(type);
+ layer_info.format.type = type.value();
+
+ auto expected_nms_info = parse_proto_nms_info(info.nms_info());
+ CHECK_EXPECTED_AS_STATUS(expected_nms_info);
+ layer_info.nms_info = expected_nms_info.release();
+
+ if (HAILO_MAX_STREAM_NAME_SIZE < (info.layer_info().name().length() + 1)) {
+ LOGGER__ERROR("The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.layer_info().name());
+ return HAILO_INTERNAL_FAILURE;
+ }
+ layer_info.name = info.layer_info().name();
+ layer_info.network_name = network_name;
+ layer_info.is_mux = false;
+ layer_info.direction = HAILO_D2H_STREAM;
+ // Due to bug in SDK quant info of fused layer is empty, so we use the quant info of the defused layer
+ layer_info.quant_info = defuse_quant_info;
+
+ // Simulation info
+ assert (1 == info.layer_info().edge_layer_base().buffer_indices_size());
+ layer_info.buffer_indices.cluster_index = info.layer_info().edge_layer_base().buffer_indices(0).cluster_index();
+ layer_info.buffer_indices.index = info.layer_info().edge_layer_base().buffer_indices(0).index();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::fill_mux_info(const ProtoHEFEdgeLayerMux &info,
+ const ProtoHEFEdgeConnectionType &edge_connection_type,
+ const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
+ bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
+ uint8_t network_index, LayerInfo &layer_info)
+{
+ const bool transposed = false;
+ auto status = fill_layer_info_with_base_info(info.edge_layer_base(), edge_connection_type, core_op.network_group_metadata,
+ hw_padding_supported, transposed, context_index, network_index, layer_info);
+ CHECK_SUCCESS(status);
+
+ if (HAILO_MAX_STREAM_NAME_SIZE < (info.name().length() + 1)) {
+ LOGGER__ERROR("The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.name());
+ return HAILO_INTERNAL_FAILURE;
+ }
+ if (HAILO_MAX_NETWORK_NAME_SIZE < (partial_network_name.length() + 1)) {
+ LOGGER__ERROR("The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", partial_network_name);
+ return HAILO_INTERNAL_FAILURE;
+ }
+ layer_info.name = info.name();
+
+ layer_info.network_name = HefUtils::get_network_name(core_op, partial_network_name);
+ layer_info.is_mux = true;
+ layer_info.predecessor.reserve(info.mux_data().number_of_predecessors());
+ layer_info.height_gcd = info.mux_data().height_gcd();
+ layer_info.height_ratios.reserve(info.mux_data().height_ratios_list_len());
+ for (const auto &height_ratio : info.mux_data().height_ratios_list()) {
+ layer_info.height_ratios.emplace_back(height_ratio);
+ }
+ // Simulation info
+ assert (1 == info.edge_layer_base().buffer_indices_size());
+ layer_info.buffer_indices.cluster_index = info.edge_layer_base().buffer_indices(0).cluster_index();
+ layer_info.buffer_indices.index = info.edge_layer_base().buffer_indices(0).index();
+
+ for (uint32_t i = 0; i < info.mux_data().number_of_predecessors(); i++) {
+ LayerInfo temp_layer = {};
+ switch (info.predecessors(i).edge_case()) {
+ case ProtoHefEdge::kLayerInfo:
+ status = fill_layer_info(info.predecessors(i).layer_info(), edge_connection_type, core_op,
+ direction, hw_padding_supported, context_index, partial_network_name, network_index, temp_layer);
+ if (HAILO_SUCCESS != status) {
+ return status;
+ }
+ layer_info.predecessor.push_back(temp_layer);
+ break;
+ case ProtoHefEdge::kLayerMux:
+ status = fill_mux_info(info.predecessors(i).layer_mux(), edge_connection_type, core_op,
+ direction, hw_padding_supported, context_index, partial_network_name, network_index, temp_layer);
+ if (HAILO_SUCCESS != status) {
+ return status;
+ }
+ layer_info.predecessor.push_back(temp_layer);
+ break;
+ default:
+ LOGGER__ERROR("Invalid layer type");
+ return HAILO_INTERNAL_FAILURE;
+ break;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::fill_boundary_layers_info(
+ const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer,
+ const SupportedFeatures &supported_features,
+ ContextMetadata &context_metadata)
+{
+ auto layer_info = get_boundary_layer_info(core_op, context_index, layer, supported_features);
+ CHECK_EXPECTED_AS_STATUS(layer_info);
+
+ context_metadata.add_boundary_layer(layer_info.release());
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::fill_inter_context_layers_info(
+ const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer,
+ const SupportedFeatures &supported_features,
+ ContextMetadata &context_metadata)
+{
+ auto layer_info = get_inter_context_layer_info(core_op, context_index, layer, supported_features);
+ CHECK_EXPECTED_AS_STATUS(layer_info);
+
+ context_metadata.add_inter_context_layer(layer_info.release());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::fill_ddr_layers_info(
+ const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer,
+ const SupportedFeatures &supported_features,
+ ContextMetadata &context_metadata)
+{
+ auto layer_info = get_ddr_layer_info(core_op, context_index, layer, supported_features);
+ CHECK_EXPECTED_AS_STATUS(layer_info);
+
+ context_metadata.add_ddr_layer(layer_info.release());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HefUtils::check_ddr_pairs_match(
+ const std::vector<LayerInfo> &context_ddr_input_layers,
+ const std::vector<LayerInfo> &context_ddr_output_layers,
+ const uint8_t context_index)
+{
+ CHECK(context_ddr_input_layers.size() == context_ddr_output_layers.size(), HAILO_INVALID_HEF,
+ "DDR pairs must be equal in size for context {}" ,context_index);
+
+ for (auto const &ddr_output_layer : context_ddr_output_layers) {
+ auto matching_input_stream = ddr_output_layer.connected_context_info.stream_index;
+ bool found_mathing_layer = false;
+ for (auto const &ddr_input_layer : context_ddr_input_layers) {
+ if (ddr_input_layer.stream_index == matching_input_stream) {
+ CHECK(!found_mathing_layer, HAILO_INVALID_HEF, "Found multiple input DDR streams for single ddr output stream");
+ found_mathing_layer = true;
+ CHECK(ddr_output_layer.nn_stream_config.core_bytes_per_buffer == ddr_input_layer.nn_stream_config.core_bytes_per_buffer,
+ HAILO_INVALID_HEF, "both sides for DDR pair must have the same core_bytes_per_buffer.\n"
+ "context index {}. Output stream index - {} output side core_bytes_per_buffer - {}."
+ "input stream index {}.input size core_bytes_per_buffer - {}",
+ context_index, ddr_output_layer.stream_index, ddr_output_layer.nn_stream_config.core_bytes_per_buffer,
+ ddr_input_layer.stream_index, ddr_input_layer.nn_stream_config.core_bytes_per_buffer);
+ CHECK(ddr_output_layer.ddr_info.total_buffers_per_frame == ddr_input_layer.ddr_info.total_buffers_per_frame,
+ HAILO_INVALID_HEF, "both sides for DDR pair must have the same total_buffers_per_frame.\n"
+ "context index {}. Output stream index - {} output side total_buffers_per_frame - {}."
+ "input stream index {}. input size total_buffers_per_frame - {}",
+ context_index, ddr_output_layer.stream_index, ddr_output_layer.ddr_info.total_buffers_per_frame,
+ ddr_input_layer.stream_index, ddr_input_layer.ddr_info.total_buffers_per_frame);
+ }
+ }
+ CHECK(found_mathing_layer, HAILO_INVALID_HEF, "didn't find any match for context {} output stream {}", context_index, ddr_output_layer.stream_index);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static Expected<ContextSwitchConfigActionPtr> parse_trigger_action(const ProtoHEFTrigger &trigger_proto)
+{
+ switch (trigger_proto.trigger_case()) {
+ case ProtoHEFTrigger::kTriggerLcu:
+ {
+ const auto cluster_index = trigger_proto.trigger_lcu().cluster_index();
+ const auto lcu_index = trigger_proto.trigger_lcu().lcu_index();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(cluster_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid cluster_index: {}.", cluster_index);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(lcu_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid lcu_index: {}.", lcu_index);
+ return WaitForLcuAction::create(static_cast<uint8_t>(cluster_index), static_cast<uint8_t>(lcu_index));
+ }
+ case ProtoHEFTrigger::kTriggerAllDataWasSent:
+ {
+ const auto stream_index = trigger_proto.trigger_all_data_was_sent().shmifo_index();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(stream_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid stream_index: {}.", stream_index);
+ return WaitOutputTransferDoneAction::create(static_cast<uint8_t>(stream_index));
+ }
+ case ProtoHEFTrigger::kTriggerDmaIdle:
+ {
+ const auto stream_index = trigger_proto.trigger_dma_idle().shmifo_index();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(stream_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid stream_index: {}.", stream_index);
+ return WaitDmaIdleAction::create(static_cast<uint8_t>(stream_index));
+ }
+ case ProtoHEFTrigger::kTriggerNms:
+ {
+ const auto aggregator_index = trigger_proto.trigger_nms().aggregator_index();
+ const auto pred_cluster_ob_index = trigger_proto.trigger_nms().pred_cluster_ob_index();
+ const auto pred_cluster_ob_cluster_index = trigger_proto.trigger_nms().pred_cluster_ob_cluster_index();
+ const auto pred_cluster_ob_interface = trigger_proto.trigger_nms().pred_cluster_ob_interface();
+ const auto succ_prepost_ob_index = trigger_proto.trigger_nms().succ_prepost_ob_index();
+ const auto succ_prepost_ob_interface = trigger_proto.trigger_nms().succ_prepost_ob_interface();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(aggregator_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid aggregator_index: {}.", aggregator_index);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(pred_cluster_ob_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid pred_cluster_ob_index: {}.", pred_cluster_ob_index);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(pred_cluster_ob_cluster_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid pred_cluster_ob_cluster_index: {}.", pred_cluster_ob_cluster_index);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(pred_cluster_ob_interface), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid pred_cluster_ob_interface: {}.", pred_cluster_ob_interface);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(succ_prepost_ob_index), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid succ_prepost_ob_index: {}.", succ_prepost_ob_index);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(succ_prepost_ob_interface), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid succ_prepost_ob_interface: {}.", succ_prepost_ob_interface);
+
+ return WaitNmsIdleAction::create(static_cast<uint8_t>(aggregator_index),
+ static_cast<uint8_t>(pred_cluster_ob_index), static_cast<uint8_t>(pred_cluster_ob_cluster_index),
+ static_cast<uint8_t>(pred_cluster_ob_interface), static_cast<uint8_t>(succ_prepost_ob_index),
+ static_cast<uint8_t>(succ_prepost_ob_interface));
+ }
+ case ProtoHEFTrigger::kTriggerAllDataWasReceived:
+ {
+ LOGGER__ERROR("kTriggerAllDataWasReceived trigger is not supported");
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+ }
+ case ProtoHEFTrigger::kTriggerNone:
+ {
+ return NoneAction::create();
+ }
+ default:
+ LOGGER__ERROR("Unsupported trigger given {}", trigger_proto.trigger_case());
+ return make_unexpected(HAILO_INVALID_HEF);
+ }
+}
+
+// Parse initial_l3 register from old hef
+constexpr uint32_t HAILO8_INITIAL_L3_CUT_MASK = 0x0000007F;
+constexpr uint32_t HAILO8_INITIAL_L3_OFFSET_MASK = 0x0007FF80L;
+constexpr uint32_t HAILO8_INITIAL_L3_OFFSET_SHIFT = 7;
+constexpr uint32_t HAILO8_INITIAL_L3_OFFSET_BYTES_GRANULARITY_SHIFT = 3;
+
+
+static std::pair<uint8_t, uint16_t> old_hef_parse_initial_l3(uint32_t initial_l3)
+{
+ // parse initial l3 as written in hailo8 initial_l3 format -
+ // 7 bits of initial_l3_cut
+ // 12 bits of initial_l3_offset, offset in 256 bits (8 bytes) granularity.
+ const uint8_t initial_l3_cut = static_cast<uint8_t>(initial_l3 & HAILO8_INITIAL_L3_CUT_MASK);
+ const uint32_t initial_l3_offset_256 = (initial_l3 & HAILO8_INITIAL_L3_OFFSET_MASK) >> HAILO8_INITIAL_L3_OFFSET_SHIFT;
+ const uint16_t initial_l3_offset = static_cast<uint16_t>(initial_l3_offset_256 << HAILO8_INITIAL_L3_OFFSET_BYTES_GRANULARITY_SHIFT);
+ return std::make_pair(initial_l3_cut, initial_l3_offset);
+}
+
+static Expected<ContextSwitchConfigActionPtr> parse_action(const ProtoHEFAction &proto_action,
+ const SupportedFeatures &supported_features)
+{
+ switch (proto_action.action_case()) {
+ case ProtoHEFAction::kDisableLcu:
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.disable_lcu().cluster_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.disable_lcu().cluster_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.disable_lcu().lcu_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid lcu_index: {}", proto_action.disable_lcu().lcu_index());
+ return DisableLcuAction::create(static_cast<uint8_t>(proto_action.disable_lcu().cluster_index()),
+ static_cast<uint8_t>(proto_action.disable_lcu().lcu_index()));
+ case ProtoHEFAction::kEnableLcu:
+ {
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_lcu().cluster_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.enable_lcu().cluster_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_lcu().lcu_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid lcu_index: {}.", proto_action.enable_lcu().lcu_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(proto_action.enable_lcu().lcu_kernel_done_address()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid lcu_kernel_done_address: {}.", proto_action.enable_lcu().lcu_kernel_done_address());
+
+ auto support_multi_networks = supported_features.multi_network_support;
+ auto network_index = static_cast<uint8_t>((support_multi_networks) ? proto_action.enable_lcu().network_index() : 0);
+
+ const auto cluster_index = static_cast<uint8_t>(proto_action.enable_lcu().cluster_index());
+ const auto lcu_index = static_cast<uint8_t>(proto_action.enable_lcu().lcu_index());
+ const auto kernel_done_address = static_cast<uint16_t>(proto_action.enable_lcu().lcu_kernel_done_address());
+ const auto kernel_done_count = static_cast<uint32_t>(proto_action.enable_lcu().lcu_kernel_done_count());
+
+ return EnableLcuAction::create(cluster_index, lcu_index, network_index, kernel_done_address,
+ kernel_done_count);
+ }
+ case ProtoHEFAction::kEnableSequencer:
+ {
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_sequencer().cluster_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.enable_sequencer().cluster_index());
+
+ // TODO: Remove when impolemeted in the hef.proto
+ uint64_t l2_offset_0 = 0;
+ uint64_t l2_offset_1 = 0;
+ // TODO: Change the CONTEXT_SWITCH__add_enable_sequencer_proto_action func to receive 4 'l2_offset' params
+ l2_offset_0 |= (uint64_t)(proto_action.enable_sequencer().l2_write_0());
+ l2_offset_0 |= ((uint64_t)(proto_action.enable_sequencer().l2_write_1()) << 32);
+ l2_offset_1 |= (uint64_t)(proto_action.enable_sequencer().l2_write_2());
+ l2_offset_1 |= ((uint64_t)(proto_action.enable_sequencer().l2_write_3()) << 32);
+
+ uint8_t initial_l3_cut = 0;
+ uint16_t initial_l3_offset = 0;
+ if (proto_action.enable_sequencer().initial_l3_info().includes_initial_l3_info()) {
+ const auto &initial_l3_info = proto_action.enable_sequencer().initial_l3_info();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(initial_l3_info.initial_l3_index()), HAILO_INVALID_HEF,
+ "Initial l3 cut {} is out of range", initial_l3_info.initial_l3_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(initial_l3_info.initial_l3_offset()), HAILO_INVALID_HEF,
+ "Initial l3 offset {} is out of range", initial_l3_info.initial_l3_offset());
+ initial_l3_cut = static_cast<uint8_t>(initial_l3_info.initial_l3_index());
+ initial_l3_offset = static_cast<uint16_t>(initial_l3_info.initial_l3_offset());
+ }
+ else {
+ // Legacy mode should work only on hailo8
+ std::tie(initial_l3_cut, initial_l3_offset) = old_hef_parse_initial_l3(proto_action.enable_sequencer().initial_l3_legacy());
+ }
+
+ return EnableSequencerAction::create(
+ static_cast<uint8_t>(proto_action.enable_sequencer().cluster_index()),
+ initial_l3_cut, initial_l3_offset,
+ proto_action.enable_sequencer().active_apu_bitmap(),
+ proto_action.enable_sequencer().active_ia_bitmap(),
+ proto_action.enable_sequencer().active_sc_bitmap(),
+ proto_action.enable_sequencer().active_l2_bitmap(),
+ l2_offset_0,
+ l2_offset_1);
+ }
+ case ProtoHEFAction::kNone:
+ return NoneAction::create();
+
+ case ProtoHEFAction::kWaitForSeqeuncer:
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.wait_for_seqeuncer().cluster_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid cluster_index: {}.", proto_action.wait_for_seqeuncer().cluster_index());
+
+ return WaitForSequencerAction::create(
+ static_cast<uint8_t>(proto_action.wait_for_seqeuncer().cluster_index()));
+
+ case ProtoHEFAction::kAllowInputDataflow:
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.allow_input_dataflow().sys_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid sys_index: {}.", proto_action.allow_input_dataflow().sys_index());
+ return AllowInputDataflowAction::create(
+ static_cast<uint8_t>(proto_action.allow_input_dataflow().sys_index()));
+
+ case ProtoHEFAction::kWaitForModuleConfigDone:
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.wait_for_module_config_done().index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid index: {}", proto_action.wait_for_module_config_done().index());
+ return WaitForModuleConfigDoneAction::create(
+ static_cast<uint8_t>(proto_action.wait_for_module_config_done().index()));
+
+ case ProtoHEFAction::kEnableNms:
+ {
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_nms().nms_unit_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid nms_unit_index: {}.", proto_action.enable_nms().nms_unit_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_nms().network_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid network_index: {}.", proto_action.enable_nms().network_index());
+
+ auto support_multi_networks = supported_features.multi_network_support;
+ auto network_index = static_cast<uint8_t>((support_multi_networks) ? proto_action.enable_nms().network_index() : 0);
+
+ const auto nms_unit_index = static_cast<uint8_t>(proto_action.enable_nms().nms_unit_index());
+
+ return EnableNmsAction::create(nms_unit_index, network_index);
+ }
+
+ default:
+ LOGGER__ERROR("Action {} not implemented", proto_action.action_case());
+ break;
+ }
+
+ // Default case
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+static Expected<Buffer> build_config_buffer(const std::vector<MemoryView> &ccw_buffers)
+{
+ size_t buffer_size = 0;
+ for (const auto &ccw_buffer : ccw_buffers) {
+ buffer_size += ccw_buffer.size();
+ }
+
+ auto config_buffer = Buffer::create(buffer_size);
+ CHECK_EXPECTED(config_buffer);
+
+ size_t current_offset = 0;
+ for (const auto &ccw_buffer : ccw_buffers) {
+ assert(current_offset + ccw_buffer.size() <= config_buffer->size());
+ memcpy(config_buffer->data() + current_offset, ccw_buffer.data(), ccw_buffer.size());
+ current_offset += ccw_buffer.size();
+ }
+
+ return config_buffer.release();
+}
+
+static hailo_status merge_write_ccw_actions(
+ std::vector<ContextSwitchConfigActionPtr> &actions,
+ ConfigBufferInfoMap &config_buffer_infos,
+ const std::vector<const ProtoHEFActionWriteDataCcw *> &write_ccw_actions)
+{
+ // Map between config stream index and vector of config buffers.
+ std::map<uint8_t, std::vector<MemoryView>> ccw_buffers_per_config_streams;
+ for (const auto *write_ccw_action : write_ccw_actions) {
+ CHECK(IS_FIT_IN_UINT8(write_ccw_action->cfg_channel_index()), HAILO_INVALID_HEF,
+ "Invalid cfg channel index");
+ const auto config_stream_index = static_cast<uint8_t>(write_ccw_action->cfg_channel_index());
+ const auto write_ccw_buffer = MemoryView::create_const(write_ccw_action->data().data(), write_ccw_action->data().size());
+ ccw_buffers_per_config_streams[config_stream_index].emplace_back(write_ccw_buffer);
+ }
+
+ for (const auto &ccw_buffers_per_config_stream : ccw_buffers_per_config_streams) {
+ const auto config_stream_index = ccw_buffers_per_config_stream.first;
+ const auto &ccw_buffers = ccw_buffers_per_config_stream.second;
+ auto config_buffer = build_config_buffer(ccw_buffers);
+ CHECK_EXPECTED_AS_STATUS(config_buffer);
+
+ assert(config_buffer->size() < std::numeric_limits<uint32_t>::max());
+ config_buffer_infos[config_stream_index].emplace_back(static_cast<uint32_t>(config_buffer->size()));
+
+ const size_t total_ccw_burst = ccw_buffers.size();
+ auto action = WriteDataCcwAction::create(config_buffer.release(), config_stream_index, total_ccw_burst);
+
+ CHECK_EXPECTED_AS_STATUS(action);
+ actions.emplace_back(action.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+static hailo_status parse_operation(std::vector<ContextSwitchConfigActionPtr> &actions,
+ ConfigBufferInfoMap &config_buffer_infos,
+ const ProtoHEFOperation &operation_proto,
+ const SupportedFeatures &supported_features)
+{
+ auto trigger_action = parse_trigger_action(operation_proto.trigger());
+ CHECK_EXPECTED_AS_STATUS(trigger_action);
+ actions.emplace_back(trigger_action.release());
+
+ // If current_write_ccw_actions isn't empty, means we currently parsing a group of consecutive write ccw actions.
+ // we will merge those actions into one write ccw per config channel.
+ std::vector<const ProtoHEFActionWriteDataCcw*> current_write_ccw_actions;
+
+ for (int action_index = 0; action_index < operation_proto.actions_size(); action_index++) {
+ const auto &proto_action = operation_proto.actions(action_index);
+ if (proto_action.action_case() == ProtoHEFAction::kWriteDataCcw) {
+ // Keep in vector, parse later
+ current_write_ccw_actions.push_back(&proto_action.write_data_ccw());
+
+ const auto next_action_index = action_index + 1;
+ const bool is_last_ccw =
+ (next_action_index == operation_proto.actions_size()) ||
+ (operation_proto.actions(next_action_index).action_case() != ProtoHEFAction::kWriteDataCcw);
+ if (is_last_ccw) {
+ auto status = merge_write_ccw_actions(actions, config_buffer_infos, current_write_ccw_actions);
+ CHECK_SUCCESS(status);
+ current_write_ccw_actions.clear();
+ }
+ } else {
+ auto action = parse_action(proto_action, supported_features);
+ CHECK_EXPECTED_AS_STATUS(action);
+ actions.emplace_back(action.release());
+ }
+ }
+ assert(current_write_ccw_actions.empty());
+
+ return HAILO_SUCCESS;
+}
+
+static Expected<ContextMetadata> parse_operations(
+ const google::protobuf::RepeatedPtrField<ProtoHEFOperation> &operations_proto,
+ const SupportedFeatures &supported_features)
+{
+ std::vector<ContextSwitchConfigActionPtr> actions;
+ ConfigBufferInfoMap config_buffer_infos;
+
+ for (const auto &operation_proto : operations_proto) {
+ auto status = parse_operation(actions, config_buffer_infos, operation_proto, supported_features);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ return ContextMetadata(std::move(actions), std::move(config_buffer_infos));
+}
+
+Expected<ContextMetadata> HefUtils::parse_preliminary_context(const ProtoHEFPreliminaryConfig &preliminary_proto,
+ const SupportedFeatures &supported_features)
+{
+ return parse_operations(preliminary_proto.operation(), supported_features);
+}
+
+Expected<ContextMetadata> HefUtils::parse_single_dynamic_context(const ProtoHEFCoreOpMock &core_op,
+ const ProtoHEFContext &context_proto, uint8_t context_index, const SupportedFeatures &supported_features)
+{
+ auto context_metadata_exp = parse_operations(context_proto.operations(), supported_features);
+ CHECK_EXPECTED(context_metadata_exp);
+ ContextMetadata context_metadata = context_metadata_exp.release();
+
+ for (const auto &edge_layer : context_proto.metadata().edge_layers()) {
+ if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+ edge_layer.context_switch_info().edge_connection_type()) {
+ auto status = fill_boundary_layers_info(core_op, context_index, edge_layer,
+ supported_features, context_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__INTERMEDIATE ==
+ edge_layer.context_switch_info().edge_connection_type()) {
+ auto status = fill_inter_context_layers_info(core_op, context_index, edge_layer,
+ supported_features, context_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR ==
+ edge_layer.context_switch_info().edge_connection_type()) {
+ auto status = fill_ddr_layers_info(core_op, context_index, edge_layer,
+ supported_features, context_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+ }
+
+ auto status = check_ddr_pairs_match(context_metadata.get_ddr_input_layers(), context_metadata.get_ddr_output_layers(),
+ context_index);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return context_metadata;
+}
+
+static hailo_status validate_unique_boundary_names(const std::vector<ContextMetadata> &contexts_metadata)
+{
+ std::unordered_set<std::string> names;
+ for (const auto &context_metadata : contexts_metadata) {
+ for (const auto &layer_info : context_metadata.get_boundary_input_layers()) {
+ CHECK(names.find(layer_info.name) == names.end(), HAILO_INVALID_HEF,
+ "Layer name should be unique. name '{}' appears more than once", layer_info.name);
+ names.insert(layer_info.name);
+ }
+
+ for (const auto &layer_info : context_metadata.get_boundary_output_layers()) {
+ CHECK(names.find(layer_info.name) == names.end(), HAILO_INVALID_HEF,
+ "Layer name should be unique. name '{}' appears more than once", layer_info.name);
+ names.insert(layer_info.name);
+ }
+ }
+ return HAILO_SUCCESS;
+}
+
+Expected<std::vector<ContextMetadata>> HefUtils::parse_dynamic_contexts(const ProtoHEFCoreOpMock &core_op, const SupportedFeatures &supported_features)
+{
+ std::vector<ContextMetadata> contexts_metadata;
+ for (uint8_t context_index = 0; context_index < core_op.contexts.size(); context_index++) {
+ auto &context_proto = core_op.contexts[context_index];
+ auto context_metadata = parse_single_dynamic_context(core_op, context_proto, context_index, supported_features);
+ CHECK_EXPECTED(context_metadata);
+ contexts_metadata.emplace_back(context_metadata.release());
+ }
+
+ const auto status = validate_unique_boundary_names(contexts_metadata);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return contexts_metadata;
+}
+
+Expected<hailo_nms_info_t> HefUtils::parse_proto_nms_info(const ProtoHEFNmsInfo &proto_nms_info)
+{
+ hailo_nms_info_t nms_info = {};
+ nms_info.number_of_classes = static_cast<uint32_t>(proto_nms_info.number_of_classes());
+ nms_info.bbox_size = static_cast<uint32_t>(proto_nms_info.bbox_size());
+ nms_info.max_bboxes_per_class = static_cast<uint32_t>(proto_nms_info.max_output_size());
+ nms_info.chunks_per_frame = static_cast<uint32_t>(proto_nms_info.input_division_factor());
+ if (nms_info.chunks_per_frame == 0) {
+ // Old hef, use default value 1
+ nms_info.chunks_per_frame = 1;
+ }
+ nms_info.is_defused = static_cast<bool>(proto_nms_info.is_defused());
+ nms_info.defuse_info.class_group_index =
+ static_cast<uint32_t>(proto_nms_info.defuse_info().class_group_index());
+
+ CHECK_AS_EXPECTED(nms_info.defuse_info.class_group_index < HailoRTCommon::MAX_DEFUSED_LAYER_COUNT,
+ HAILO_INVALID_HEF, "class_group_index from HEF is bigger than {}!", HailoRTCommon::MAX_DEFUSED_LAYER_COUNT);
+
+ const std::string &original_name = proto_nms_info.defuse_info().original_name();
+ CHECK_AS_EXPECTED(HAILO_MAX_STREAM_NAME_SIZE >= (original_name.length() + 1), HAILO_INTERNAL_FAILURE,
+ "original_name field '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE including the null terminated character)",
+ original_name);
+ strncpy(nms_info.defuse_info.original_name, original_name.c_str(), original_name.length() + 1);
+ return nms_info;
+}
+
+Expected<LayerInfo> HefUtils::get_boundary_layer_info(const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features)
+{
+ // We parse only boundary layers for user usage
+ CHECK_AS_EXPECTED(
+ ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY == layer.context_switch_info().edge_connection_type(),
+ HAILO_INTERNAL_FAILURE, "get_layer_info can be called only on boundary layers");
+
+ LayerInfo result = {};
+ const auto direction =
+ (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST == layer.direction()) ?
+ HAILO_D2H_STREAM : HAILO_H2D_STREAM;
+ auto support_multi_networks = supported_features.multi_network_support;
+ auto network_index = static_cast<uint8_t>((support_multi_networks) ? layer.network_index() : 0);
+ auto partial_network_name = HefUtils::get_partial_network_name_by_index(core_op, network_index, supported_features);
+ CHECK_EXPECTED(partial_network_name);
+ const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer);
+ if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()) {
+ // TODO: return LayerInfo
+ auto status = fill_layer_info(layer.layer_info(), layer.context_switch_info().edge_connection_type(),
+ core_op, direction, hw_padding_supported, context_index, partial_network_name.value(), network_index, result);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()) {
+ // TODO: return LayerInfo
+ auto status = fill_mux_info(layer.layer_mux(), layer.context_switch_info().edge_connection_type(),
+ core_op, direction, hw_padding_supported, context_index, partial_network_name.value(), network_index, result);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else {
+ LOGGER__ERROR("Invalid layer type");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+
+ result.direction = (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST ==
+ layer.direction()) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
+
+ if (layer.has_pad_index()) {
+ result.pad_index = layer.pad_index();
+ }
+
+ return result;
+}
+
+static Expected<ConnectedContextInfo> parse_connected_context_info(
+ const ProtoHEFConnectedContextInfo &connected_context_proto)
+{
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(connected_context_proto.sys_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid connected_sys_index: {}.", connected_context_proto.sys_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(connected_context_proto.engine_id()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid engine_id: {}. in connected_contexts", connected_context_proto.engine_id());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(connected_context_proto.index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid connected_context_index: {}.", connected_context_proto.index());
+
+ ConnectedContextInfo connected_context{};
+ connected_context.context_index = static_cast<uint8_t>(connected_context_proto.index());
+ connected_context.stream_index = static_cast<uint8_t>(connected_context_proto.sys_index());
+ connected_context.dma_engine_index = static_cast<uint8_t>(connected_context_proto.engine_id());
+ return connected_context;
+}
+
+Expected<LayerInfo> HefUtils::get_inter_context_layer_info(const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features)
+{
+ LayerInfo result = {};
+ CHECK_AS_EXPECTED(PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type(), HAILO_INVALID_HEF, "Inter-context layer can't be mux.");
+
+ result.type = LayerType::INTER_CONTEXT;
+ auto support_multi_networks = supported_features.multi_network_support;
+ result.network_index = static_cast<uint8_t>((support_multi_networks) ? layer.network_index() : 0);
+ auto partial_network_name = HefUtils::get_partial_network_name_by_index(core_op, result.network_index, supported_features);
+ CHECK_EXPECTED(partial_network_name);
+ result.network_name = HefUtils::get_network_name(core_op, partial_network_name.release());
+ result.context_index = context_index;
+ const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer);
+ result.name = layer.layer_info().name();
+ auto nn_stream_config_exp = HefConfigurator::parse_nn_stream_config(layer.layer_info().edge_layer_base(),
+ hw_padding_supported, layer.context_switch_info().edge_connection_type());
+ CHECK_EXPECTED(nn_stream_config_exp);
+ result.nn_stream_config = nn_stream_config_exp.release();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().sys_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid sys_index: {}.", layer.layer_info().edge_layer_base().sys_index());
+ result.stream_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().sys_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().engine_id()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid engine_id: {}.", layer.layer_info().edge_layer_base().engine_id());
+ result.dma_engine_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().engine_id());
+
+ result.max_shmifo_size = layer.layer_info().edge_layer_base().max_shmifo_size();
+
+ result.direction = (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST ==
+ layer.direction()) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
+
+ // HRT-7201 - The system supports one src and multiple dstinations. Right now we're saving only one dstination
+ CHECK_AS_EXPECTED(layer.context_switch_info().connected_contexts_size() >= 1, HAILO_INVALID_HEF,
+ "Inter context layer info must contain connected_context");
+ auto connected_context = parse_connected_context_info(layer.context_switch_info().connected_contexts(0));
+ CHECK_EXPECTED(connected_context);
+ result.connected_context_info = connected_context.release();
+
+ return result;
+}
+
+Expected<LayerInfo> HefUtils::get_ddr_layer_info(const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features)
+{
+ LayerInfo result = {};
+ CHECK_AS_EXPECTED(PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type(), HAILO_INVALID_HEF, "DDR layer can't be mux.");
+
+ result.type = LayerType::DDR;
+
+ auto support_multi_networks = supported_features.multi_network_support;
+ result.network_index = static_cast<uint8_t>((support_multi_networks) ? layer.network_index() : 0);
+ auto partial_network_name = HefUtils::get_partial_network_name_by_index(core_op, result.network_index, supported_features);
+ CHECK_EXPECTED(partial_network_name);
+ result.network_name = HefUtils::get_network_name(core_op, partial_network_name.release());
+ result.context_index = context_index;
+ const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer);
+ result.name = layer.layer_info().name();
+ auto nn_stream_config_exp = HefConfigurator::parse_nn_stream_config(layer.layer_info().edge_layer_base(),
+ hw_padding_supported, layer.context_switch_info().edge_connection_type());
+ CHECK_EXPECTED(nn_stream_config_exp);
+ result.nn_stream_config = nn_stream_config_exp.release();
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().sys_index()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid sys_index: {}.", layer.layer_info().edge_layer_base().sys_index());
+ result.stream_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().sys_index());
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(layer.layer_info().edge_layer_base().engine_id()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid engine_id: {}.", layer.layer_info().edge_layer_base().engine_id());
+ result.dma_engine_index = static_cast<uint8_t>(layer.layer_info().edge_layer_base().engine_id());
+ result.max_shmifo_size = layer.layer_info().edge_layer_base().max_shmifo_size();
+
+ CHECK_AS_EXPECTED(layer.context_switch_info().connected_contexts_size() == 1, HAILO_INVALID_HEF,
+ "Only single connected context is supported on DDR channels");
+ auto connected_context = parse_connected_context_info(layer.context_switch_info().connected_contexts(0));
+ CHECK_EXPECTED(connected_context);
+ CHECK_AS_EXPECTED(context_index == connected_context->context_index,
+ HAILO_INVALID_HEF, "for ddr layer, connected_context_index must be same to the edge layer's context");
+ result.connected_context_info = connected_context.release();
+
+ result.direction = (ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST ==
+ layer.direction()) ? HAILO_D2H_STREAM : HAILO_H2D_STREAM;
+
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(layer.layer_info().edge_layer_base().core_buffers_per_frame()), HAILO_INVALID_HEF,
+ "Failed to parse HEF. Invalid core_buffers_per_frame: {}.", layer.layer_info().edge_layer_base().core_buffers_per_frame());
+ result.ddr_info.total_buffers_per_frame = static_cast<uint16_t>(layer.layer_info().edge_layer_base().core_buffers_per_frame());
+
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(layer.context_switch_info().buffers()), HAILO_INVALID_HEF,
+ "calculated number of transfers for DDR buffer is out of UINT16_T range");
+ result.ddr_info.min_buffered_rows = static_cast<uint16_t>(layer.context_switch_info().buffers());
+
+ return result;
+}
+
+Expected<std::vector<std::string>> HefUtils::get_sorted_output_names(const ProtoHEFCoreOpMock &core_op)
+{
+ if (core_op.fused_layers_metadata.network_has_fused_layers()) {
+ return std::vector<std::string>(std::begin(core_op.fused_layers_metadata.updated_sorted_output_names()),
+ std::end(core_op.fused_layers_metadata.updated_sorted_output_names()));
+ } else if (0 != core_op.sorted_outputs_order.size()) {
+ // For backwards compatibility before we've added updated_sorted_output_names
+ return std::vector<std::string>(std::begin(core_op.sorted_outputs_order),
+ std::end(core_op.sorted_outputs_order));
+ } else {
+ // For backwards compatibility before we've added this field
+ uint32_t number_of_contexts = core_op.contexts.size();
+ const auto& context_metadata = core_op.contexts[number_of_contexts - 1].metadata();
+
+ CHECK_AS_EXPECTED(0 < context_metadata.sorted_outputs_order_size(), HAILO_INVALID_HEF,
+ "Sorted output names is not set up in the HEF.");
+
+ return std::vector<std::string>(std::begin(context_metadata.sorted_outputs_order()),
+ std::end(context_metadata.sorted_outputs_order()));
+ }
+}
+
+Expected<std::string> HefUtils::get_partial_network_name_by_index(const ProtoHEFCoreOpMock &core_op, uint8_t network_index,
+ const SupportedFeatures &supported_features)
+{
+ if (supported_features.multi_network_support) {
+ CHECK_AS_EXPECTED(network_index < core_op.networks_names.size(), HAILO_INVALID_ARGUMENT,
+ "Requested name for network_index={}, however there are only {} networks in the network group",
+ network_index, core_op.networks_names.size());
+ return std::string(core_op.networks_names[network_index]);
+ } else {
+ auto partial_network_name = core_op.network_group_metadata.network_group_name();
+ return partial_network_name;
+ }
+}
+
+std::string HefUtils::get_network_group_name(const ProtoHEFNetworkGroup &net_group, const SupportedFeatures &/*supported_features*/)
+{
+ if (!net_group.partial_network_groups().empty()) {
+ return net_group.partial_network_groups(0).network_group().network_group_metadata().network_group_name();
+ }
+ return net_group.network_group_metadata().network_group_name();
+}
+
+std::string HefUtils::get_network_name(const std::string &net_group_name, const std::string &partial_network_name)
+{
+ return net_group_name + HAILO_DEFAULT_NETWORK_NAME_QUALIFIER + partial_network_name;
+}
+
+std::string HefUtils::get_network_name(const ProtoHEFCoreOpMock &core_op, const std::string &partial_network_name)
+{
+ return HefUtils::get_network_name(core_op.network_group_metadata.network_group_name(), partial_network_name);
+}
+
+Expected<std::shared_ptr<ProtoHEFCoreOpMock>> Hef::Impl::get_core_op_per_arch(const ProtoHEFCoreOpMock &core_op,
+ ProtoHEFHwArch hef_arch, hailo_device_architecture_t device_arch, uint32_t partial_clusters_layout_bitmap)
+{
+ if (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == hef_arch) {
+ // Hailo8 can work with Hailo8L configurations. in that case we choose one of the configurations
+ for (auto &partial_core_op : core_op.partial_core_ops) {
+ if (partial_clusters_layout_bitmap == partial_core_op->layout.partial_clusters_layout_bitmap()
+ || (HAILO_ARCH_HAILO8 == device_arch)) {
+ return std::make_shared<ProtoHEFCoreOpMock>(*(partial_core_op->core_op));
+ }
+ }
+ LOGGER__ERROR("There is no matching partial_clusters_layout_bitmap configuration in the given HEF");
+ return make_unexpected(HAILO_INVALID_HEF);
+ } else {
+ return std::make_shared<ProtoHEFCoreOpMock>(core_op);
+ }
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_sorted_output_names(const std::string &net_group_name)
+{
+ if (m_supported_features.hailo_net_flow) {
+ std::vector<std::string> res;
+ for (const auto &net_group : m_groups) {
+ auto curr_name = HefUtils::get_network_group_name(*net_group, m_supported_features);
+ if (curr_name == net_group_name) {
+ res.reserve(net_group->sorted_outputs_order().size());
+ for (auto &name : net_group->sorted_outputs_order()) {
+ res.push_back(name);
+ }
+ return res;
+ }
+ }
+ LOGGER__ERROR("Did not find network group of name {}", net_group_name);
+ return make_unexpected(HAILO_INVALID_HEF);
+ }
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ auto res = core_op_metadata->get_sorted_output_names();
+ return res;
+}
+
+static Expected<WriteMemoryInfo> parse_ccw_buffer(const std::string &ccw_buffer)
+{
+ WriteMemoryInfo write_memory_info = {};
+ CHECK_AS_EXPECTED(ccw_buffer.size() > CCW_DATA_OFFSET, HAILO_INVALID_HEF, "ccw buffer is too small");
+ CcwHeader *header = (CcwHeader*)(ccw_buffer.data());
+
+ uint32_t words_count = header->words_count + 1;
+ auto data_length = words_count * CCW_BYTES_IN_WORD;
+ write_memory_info.address = header->address;
+
+ // Validation for ccw size
+ size_t expected_ccw_data_length = (ccw_buffer.length() - CCW_DATA_OFFSET);
+ if (0 != (words_count % 2)) {
+ expected_ccw_data_length -= CCW_BYTES_IN_WORD;
+ }
+ CHECK_AS_EXPECTED(data_length == expected_ccw_data_length, HAILO_INVALID_HEF,
+ "Invalid ccw buffer was parsed from HEF");
+
+ auto data_buff = Buffer::create(reinterpret_cast<const uint8_t*>(ccw_buffer.data() + CCW_DATA_OFFSET), data_length);
+ CHECK_EXPECTED(data_buff);
+ write_memory_info.data = data_buff.release();
+
+ return write_memory_info;
+}
+
+/* HcpConfigCoreOp funcs */
+
+Expected<std::vector<WriteMemoryInfo>> Hef::Impl::create_single_context_core_op_config(const ProtoHEFPreliminaryConfig& proto_config)
+{
+ std::vector<WriteMemoryInfo> config_buffers;
+
+ for (const auto &operation : proto_config.operation()) {
+ switch (operation.trigger().trigger_case()) {
+ case ProtoHEFTrigger::kTriggerNone: {
+ break;
+ }
+ default: {
+ LOGGER__ERROR("Triggers different from 'ProtoHEFTriggerNone' are not supported");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+ }
+
+ for (const auto &action : operation.actions()) {
+ switch (action.action_case()) {
+ case ProtoHEFAction::kNone: {
+ break;
+ }
+ case ProtoHEFAction::kWriteData: {
+ WriteMemoryInfo write_memory_info = {};
+ write_memory_info.address = static_cast<uint32_t>(action.write_data().address());
+ auto data_buff = Buffer::create(
+ reinterpret_cast<const uint8_t*>(action.write_data().data().data()),
+ action.write_data().data().length());
+ CHECK_EXPECTED(data_buff);
+ write_memory_info.data = data_buff.release();
+ config_buffers.emplace_back(std::move(write_memory_info));
+ break;
+ }
+ case ProtoHEFAction::kWriteDataCcw: {
+ auto config_buffer = parse_ccw_buffer(action.write_data_ccw().data());
+ CHECK_EXPECTED(config_buffer);
+ config_buffers.emplace_back(config_buffer.release());
+ break;
+ }
+ case ProtoHEFAction::kDisableLcu: {
+ // We ignore this action. the lcu_disable will happen in the nn_core reset before configuring specific network_group
+ break;
+ }
+ case ProtoHEFAction::kEnableLcu: {
+ WriteMemoryInfo write_memory_info = {};
+ write_memory_info.address = action.enable_lcu().lcu_enable_address();
+ auto data_buff = Buffer::create(ENABLE_LCU_CONTROL_WORD, sizeof(ENABLE_LCU_CONTROL_WORD));
+ CHECK_EXPECTED(data_buff);
+ write_memory_info.data = data_buff.release();
+ config_buffers.emplace_back(std::move(write_memory_info));
+ break;
+ }
+ case ProtoHEFAction::kAllowInputDataflow: {
+ case ProtoHEFAction::kWaitForModuleConfigDone:
+ // We ignore the 'wait_for_interrupt' actions. After writing the configurations we can be sure everything is configured and dont need to wait for interrupts
+ break;
+ }
+ case ProtoHEFAction::kWaitForSeqeuncer: {
+ case ProtoHEFAction::kEnableSequencer:
+ LOGGER__ERROR("Parsing error. Sequencer related actions are not supported over Ethernet. "
+ "If you use the Ethernet interface, please disable the Sequencer in the Dataflow Compiler (SDK) and then re-create the HEF. "
+ "Disabling the Sequencer is done using the hef_param command in the model script (ALLS file). "
+ "See the Dataflow Compiler user guide for more information.");
+ return make_unexpected(HAILO_INVALID_HEF);
+ }
+ default: {
+ LOGGER__ERROR("Invalid action");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+ }
+ }
+ }
+
+ return config_buffers;
+}
+
+ProtoHEFHwArch Hef::Impl::get_device_arch()
+{
+ return m_header.hw_arch();
+}
+
+Expected<float64_t> Hef::Impl::get_bottleneck_fps(const std::string &net_group_name)
+{
+ auto core_op = get_core_op_by_net_group_name(net_group_name);
+ CHECK_EXPECTED(core_op);
+ return core_op.value()->network_group_metadata.bottleneck_fps();
+}
+
+bool Hef::Impl::contains_ddr_layers(const ProtoHEFCoreOpMock& core_op)
+{
+ for (auto &context : core_op.contexts) {
+ for (auto &layer : context.metadata().edge_layers()) {
+ if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR ==
+ layer.context_switch_info().edge_connection_type()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_stream_names_from_vstream_name(const std::string &vstream_name,
+ const std::string &net_group_name)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ return core_op_metadata->get_stream_names_from_vstream_name(vstream_name);
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_vstream_names_from_stream_name(const std::string &stream_name,
+ const std::string &net_group_name)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ return core_op_metadata->get_vstream_names_from_stream_name(stream_name);
+}
+
+Expected<std::string> Hef::Impl::get_vstream_name_from_original_name_mux(const std::string &original_name, const ProtoHefEdge &layer)
+{
+ switch (layer.edge_case()) {
+ case ProtoHefEdge::kLayerInfo:
+ for (const auto &name : layer.layer_info().original_names()) {
+ if (original_name == name) {
+ return std::string(layer.layer_info().name());
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+ case ProtoHefEdge::kLayerMux:
+ for (const auto &pred : layer.layer_mux().predecessors()) {
+ auto res = get_vstream_name_from_original_name_mux(original_name, pred);
+ if (res) {
+ return std::move(res.value());
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+ default:
+ LOGGER__ERROR("Invalid layer type");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+}
+
+Expected<std::string> Hef::Impl::get_vstream_name_from_original_name(const std::string &original_name,
+ const std::string &net_group_name)
+{
+ auto core_op = get_core_op_by_net_group_name(net_group_name);
+ CHECK_EXPECTED(core_op);
+
+ std::string results;
+
+ for (const auto &context : core_op.value()->contexts) {
+ for (const auto &layer_info : context.metadata().edge_layers()) {
+ if ((is_h2d_boundary_info_layer(layer_info)) || (is_d2h_boundary_info_layer(layer_info))) {
+ for (auto &name : layer_info.layer_info().original_names()) {
+ if (original_name == name) {
+ CHECK_AS_EXPECTED(results.empty(), HAILO_INVALID_HEF, "Original name {} appears more than once in the HEF.", original_name);
+ results = std::string(layer_info.layer_info().name());
+ }
+ }
+ } else if(is_d2h_boundary_mux_layer(layer_info)) {
+ for (auto &pred : layer_info.layer_mux().predecessors()) {
+ auto stream_name = get_vstream_name_from_original_name_mux(original_name, pred);
+ if (stream_name) {
+ CHECK_AS_EXPECTED(results.empty(), HAILO_INVALID_HEF, "Original name {} appears more than once in the HEF.", original_name);
+ results = stream_name.value();
+ }
+ }
+ }
+ }
+ }
+ CHECK_AS_EXPECTED(!results.empty(), HAILO_NOT_FOUND);
+ return results;
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_original_names_from_vstream_name_mux(const std::string &vstream_name, const ProtoHefEdge &layer)
+{
+ switch (layer.edge_case()) {
+ case ProtoHefEdge::kLayerInfo:
+ {
+ if (vstream_name == layer.layer_info().name()) {
+ std::vector<std::string> results;
+ for (const auto &name : layer.layer_info().original_names()) {
+ results.push_back(name);
+ }
+ return results;
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+ case ProtoHefEdge::kLayerMux:
+ for (const auto &pred : layer.layer_mux().predecessors()) {
+ auto res = get_original_names_from_vstream_name_mux(vstream_name, pred);
+ if (res) {
+ return std::move(res.value());
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+ default:
+ LOGGER__ERROR("Invalid layer type");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_original_names_from_vstream_name(const std::string &vstream_name,
+ const std::string &net_group_name)
+{
+ auto copre_op = get_core_op_by_net_group_name(net_group_name);
+ CHECK_EXPECTED(copre_op);
+
+ std::vector<std::string> results;
+
+ for (const auto &context : copre_op.value()->contexts) {
+ for (const auto &layer_info : context.metadata().edge_layers()) {
+ if ((is_h2d_boundary_info_layer(layer_info)) || (is_d2h_boundary_info_layer(layer_info))) {
+ if (vstream_name == layer_info.layer_info().name()) {
+ for (const auto &name : layer_info.layer_info().original_names()) {
+ results.push_back(name);
+ }
+ return results;
+ }
+ } else if(is_d2h_boundary_mux_layer(layer_info)) {
+ for (const auto &pred : layer_info.layer_mux().predecessors()) {
+ auto names = get_original_names_from_vstream_name_mux(vstream_name, pred);
+ if (names) {
+ return std::move(names.value());
+ }
+ }
+ }
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+hailo_status Hef::Impl::validate_core_op_unique_layer_names(const ProtoHEFCoreOpMock &core_op)
+{
+ std::set<std::string> edge_layer_names;
+ std::string layer_name;
+ for (auto &context : core_op.contexts) {
+ for (auto &layer : context.metadata().edge_layers()) {
+ // TODO: remove check for boundary layer after fix will be pushed in SDK
+ if (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+ layer.context_switch_info().edge_connection_type()) {
+ if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()) {
+ layer_name = layer.layer_info().name();
+ } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()) {
+ layer_name = layer.layer_mux().name();
+ } else {
+ LOGGER__ERROR("Invalid layer type.");
+ return HAILO_INVALID_HEF;
+ }
+ CHECK(!contains(edge_layer_names, layer_name), HAILO_INVALID_HEF,
+ "layer_name should be unique. {} appears more than once in the given network_group.",
+ layer_name);
+ edge_layer_names.insert(layer_name);
+ }
+ }
+ }
+ return HAILO_SUCCESS;
+}
+
+std::vector<std::string> Hef::get_network_groups_names()
+{
+ return pimpl->get_network_groups_names();
+}
+
+Expected<NetworkGroupsParamsMap> Hef::create_configure_params(hailo_stream_interface_t stream_interface)
+{
+ NetworkGroupsParamsMap results;
+ for (const auto &name : pimpl->get_network_groups_names()) {
+ auto params = create_configure_params(stream_interface, name);
+ CHECK_EXPECTED(params);
+ results.emplace(std::make_pair(name, params.release()));
+ }
+ return results;
+}
+
+Expected<ConfigureNetworkParams> Hef::create_configure_params(hailo_stream_interface_t stream_interface, const std::string &network_group_name)
+{
+ return pimpl->create_configure_params(stream_interface, network_group_name);
+}
+
+Expected<NetworkGroupsParamsMap> Hef::create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params)
+{
+ NetworkGroupsParamsMap results;
+ for (const auto &name : pimpl->get_network_groups_names()) {
+ auto params = create_configure_params_mipi_input(output_interface, mipi_params, name);
+ CHECK_EXPECTED(params);
+ results.emplace(std::make_pair(name, params.release()));
+ }
+ return results;
+}
+
+
+Expected<ConfigureNetworkParams> Hef::create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params, const std::string &network_group_name)
+{
+ return pimpl->create_configure_params_mipi_input(output_interface, mipi_params, network_group_name);
+}
+
+std::string Hef::hash() const
+{
+ const auto &md5 = pimpl->md5();
+ const bool LOWERCASE = false;
+ return StringUtils::to_hex_string(md5, MD5_DIGEST_LENGTH, LOWERCASE);
+}
+
+std::vector<std::string> Hef::Impl::get_network_groups_names()
+{
+ std::vector<std::string> results;
+ results.reserve(m_groups.size());
+
+ for (const auto &net_group : m_groups) {
+ auto &network_group_name = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
+ net_group->partial_network_groups(0).network_group().network_group_metadata().network_group_name()
+ : net_group->network_group_metadata().network_group_name();
+ results.push_back(network_group_name);
+ }
+ return results;
+}
+
+Expected<std::vector<hailo_network_group_info_t>> Hef::get_network_groups_infos()
+{
+ return pimpl->get_network_groups_infos();
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_stream_infos_description(const std::string &network_group_name, const std::string &network_name)
+{
+ std::vector<std::string> infos_strings;
+ auto input_stream_infos = get_input_stream_infos(network_group_name, network_name);
+ CHECK_EXPECTED(input_stream_infos, "Failed to parse input stream infos");
+ auto output_stream_infos = get_output_stream_infos(network_group_name, network_name);
+ CHECK_EXPECTED(output_stream_infos, "Failed to parse output stream infos");
+ infos_strings.reserve(input_stream_infos.value().size() + output_stream_infos.value().size());
+ std::string infos_string;
+
+ for (const auto &stream_info : input_stream_infos.value()) {
+ auto shape_str = get_shape_str(stream_info);
+ infos_string = "Input " + std::string(stream_info.name) + " " + shape_str + "\n";
+ infos_strings.emplace_back(infos_string);
+ }
+
+ for (const auto &stream_info : output_stream_infos.value()) {
+ auto shape_str = get_shape_str(stream_info);
+ infos_string = "Output " + std::string(stream_info.name) + " " + shape_str + "\n";
+ infos_strings.emplace_back(infos_string);
+ }
+
+ return infos_strings;
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_vstream_infos_description(const std::string &network_group_name, const std::string &network_name)
+{
+ std::vector<std::string> infos_strings;
+ auto input_vstream_infos = get_input_vstream_infos(network_group_name, network_name);
+ CHECK_EXPECTED(input_vstream_infos, "Failed to parse input vstream infos");
+ auto output_vstream_infos = get_output_vstream_infos(network_group_name, network_name);
+ CHECK_EXPECTED(output_vstream_infos, "Failed to parse output stream infos");
+ infos_strings.reserve(input_vstream_infos.value().size() + output_vstream_infos.value().size());
+ std::string infos_string;
+
+ for (const auto &vstream_info : input_vstream_infos.value()) {
+ auto shape_str = get_shape_str(vstream_info);
+ infos_string = "Input " + std::string(vstream_info.name) + " " + shape_str + "\n";
+ infos_strings.emplace_back(infos_string);
+ }
+
+ for (const auto &vstream_info : output_vstream_infos.value()) {
+ auto shape_str = get_shape_str(vstream_info);
+ infos_string = "Output " + std::string(vstream_info.name) + " " + shape_str + "\n";
+ infos_strings.emplace_back(infos_string);
+ }
+
+ return infos_strings;
+}
+
+Expected<std::vector<std::string>> Hef::Impl::get_post_processes_infos_description(const std::string &network_group_name)
+{
+ std::vector<std::string> infos_strings;
+ std::string infos_string;
+
+ auto post_process = post_process_ops(network_group_name);
+ for (const auto &post_process_info : post_process) {
+ infos_string = post_process_info->op->get_op_description();
+ infos_string += ", Bbox size: " + std::to_string(post_process_info->nms_info.bbox_size) +
+ ", Max bboxes per class: " + std::to_string(post_process_info->nms_info.max_bboxes_per_class);
+ }
+ /* If the string is empty there is no need to continue. */
+ if (infos_string.empty()) {
+ return infos_strings;
+ }
+
+ /* Splitting the info string and assembling the vector from each token. */
+ std::string token;
+ size_t pos;
+ while ((pos = infos_string.find(",")) != std::string::npos) {
+ token.assign(infos_string.begin(), infos_string.begin() + pos);
+ token += "\n";
+ infos_strings.push_back(token);
+ /* Assuming each token is separated with ", " */
+ infos_string.erase(0, pos + SKIP_SPACE_COMMA_CHARACTERS);
+ }
+ infos_strings.push_back(infos_string + "\n");
+
+ return infos_strings;
+}
+
+Expected<std::string> Hef::get_hef_description(bool stream_infos, bool vstream_infos)
+{
+ auto arch = get_hef_device_arch();
+ CHECK_EXPECTED(arch);
+ return pimpl->get_hef_description(stream_infos, vstream_infos, arch.value());
+}
+
+Expected<std::string> Hef::Impl::get_hef_description(bool stream_infos, bool vstream_infos, hailo_device_architecture_t device_arch)
+{
+ std::string hef_infos;
+ auto hef_arch_str = HailoRTCommon::get_device_arch_str(device_arch);
+ hef_infos += "Architecture HEF was compiled for: " + hef_arch_str + "\n";
+
+ auto network_group_infos = get_network_groups_infos();
+ CHECK_EXPECTED(network_group_infos);
+ for (const auto &network_group_info : network_group_infos.release()) {
+ auto core_op_meta_data = get_core_op_metadata(network_group_info.name);
+ CHECK_EXPECTED(core_op_meta_data);
+ auto number_of_contexts = core_op_meta_data->get_contexts_count();
+ auto contexts_str = (network_group_info.is_multi_context ? "Multi Context - Number of contexts: " + std::to_string(number_of_contexts) : "Single Context");
+ hef_infos += "Network group name: " + std::string(network_group_info.name) + ", " + contexts_str + "\n";
+
+ auto network_infos = get_network_infos(network_group_info.name);
+ CHECK_EXPECTED(network_infos, "Failed to parse networks infos");
+
+ for (const auto &network_info : network_infos.value()) {
+ hef_infos += add_tabs(1) + "Network name: " + network_info.name + "\n";
+ if (stream_infos) {
+ auto stream_infos_strings = get_stream_infos_description(network_group_info.name, network_info.name);
+ CHECK_EXPECTED(stream_infos_strings);
+ hef_infos += add_tabs(2) + "Stream infos:" + "\n";
+ for (auto stream_info_string : stream_infos_strings.value()) {
+ hef_infos += add_tabs(3) + stream_info_string;
+ }
+ }
+ if (vstream_infos) {
+ auto vstream_infos_strings = get_vstream_infos_description(network_group_info.name, network_info.name);
+ CHECK_EXPECTED(vstream_infos_strings);
+ hef_infos += add_tabs(2) + "VStream infos:" + "\n";
+ for (auto vstream_info_string : vstream_infos_strings.value()) {
+ hef_infos += add_tabs(3) + vstream_info_string;
+ }
+
+ auto post_processes_infos_strings = get_post_processes_infos_description(network_group_info.name);
+ CHECK_EXPECTED(post_processes_infos_strings);
+ /* Validating that there is a postprocess info. */
+ if (post_processes_infos_strings->size() <= 0) {
+ continue;
+ }
+ hef_infos += add_tabs(3) + "Operation:" + "\n";
+ for (auto post_process_info_string : post_processes_infos_strings.value()) {
+ hef_infos += add_tabs(4) + post_process_info_string;
+ }
+ }
+ }
+ }
+
+ return hef_infos;
+}
+
+Expected<std::vector<hailo_network_group_info_t>> Hef::Impl::get_network_groups_infos()
+{
+ std::vector<hailo_network_group_info_t> results;
+ results.reserve(m_core_ops_per_group.size());
+
+ for (const auto &group_name_to_core_op : m_core_ops_per_group) {
+ const auto &core_op = group_name_to_core_op.second[0];
+ hailo_network_group_info_t info = {};
+ auto &network_group_name = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
+ core_op.partial_core_ops[0]->core_op->network_group_metadata.network_group_name()
+ : core_op.network_group_metadata.network_group_name();
+ CHECK_AS_EXPECTED(HAILO_MAX_NETWORK_GROUP_NAME_SIZE >= (network_group_name.length() + 1), HAILO_INTERNAL_FAILURE,
+ "The network group '{}' has a too long name (max is HAILO_MAX_NETWORK_GROUP_NAME_SIZE)", network_group_name);
+ strncpy(info.name, network_group_name.c_str(), network_group_name.length() + 1);
+ info.is_multi_context = (1 < core_op.contexts.size());
+ results.push_back(info);
+ }
+ return results;
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> Hef::make_input_vstream_params(
+ const std::string &name, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
+ uint32_t queue_size)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->make_input_vstream_params(network_pair.value().first, network_pair.value().second, quantized, format_type,
+ timeout_ms, queue_size);
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> Hef::Impl::make_input_vstream_params(
+ const std::string &net_group_name, const std::string &network_name, bool quantized,
+ hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ std::map<std::string, hailo_vstream_params_t> input_vstreams_params;
+ auto status = fill_missing_input_vstream_params_with_default(net_group_name,
+ network_name, input_vstreams_params, quantized, format_type, timeout_ms, queue_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return input_vstreams_params;
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> Hef::make_output_vstream_params(
+ const std::string &name, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
+ uint32_t queue_size)
+{
+ auto network_pair = pimpl->get_network_group_and_network_name(name);
+ CHECK_EXPECTED(network_pair);
+
+ return pimpl->make_output_vstream_params(network_pair.value().first, network_pair.value().second, quantized, format_type,
+ timeout_ms, queue_size);
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> Hef::Impl::make_output_vstream_params(
+ const std::string &net_group_name, const std::string &network_name, bool quantized,
+ hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ std::map<std::string, hailo_vstream_params_t> output_vstreams_params;
+ auto status = fill_missing_output_vstream_params_with_default(net_group_name,
+ network_name, output_vstreams_params, quantized, format_type, timeout_ms, queue_size);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return output_vstreams_params;
+}
+
+hailo_status Hef::Impl::fill_missing_input_vstream_params_with_default(const std::string &net_group_name,
+ const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED_AS_STATUS(core_op_metadata);
+ auto input_vstream_infos = core_op_metadata->get_input_vstream_infos(network_name);
+ CHECK_EXPECTED_AS_STATUS(input_vstream_infos);
+
+ return fill_missing_vstream_params_with_default(input_vstreams_params, input_vstream_infos.value(),
+ quantized, format_type, timeout_ms, queue_size);
+}
+
+hailo_status Hef::Impl::fill_missing_output_vstream_params_with_default(const std::string &net_group_name,
+ const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &output_vstream_params,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED_AS_STATUS(core_op_metadata);
+ auto output_vstream_infos = core_op_metadata->get_output_vstream_infos(network_name);
+ CHECK_EXPECTED_AS_STATUS(output_vstream_infos);
+
+ return fill_missing_vstream_params_with_default(output_vstream_params, output_vstream_infos.value(),
+ quantized, format_type, timeout_ms, queue_size);
+}
+
+hailo_status Hef::Impl::fill_missing_vstream_params_with_default(std::map<std::string, hailo_vstream_params_t> &vstream_params,
+ std::vector<hailo_vstream_info_t> &vstream_infos, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
+ uint32_t queue_size)
+{
+ hailo_format_flags_t flags = static_cast<hailo_format_flags_t>(HAILO_FORMAT_FLAGS_NONE);
+ if (quantized) {
+ flags = static_cast<hailo_format_flags_t>(flags | HAILO_FORMAT_FLAGS_QUANTIZED);
+ }
+ for (const auto &vstream_info : vstream_infos) {
+ std::string vstream_name(vstream_info.name);
+ if (contains(vstream_params, vstream_name)) {
+ continue;
+ }
+ hailo_vstream_params_t params{};
+ params.user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
+ params.user_buffer_format.type = format_type;
+ params.user_buffer_format.flags = flags;
+ params.timeout_ms = timeout_ms;
+ params.queue_size = queue_size;
+ vstream_params.insert(std::make_pair(vstream_name, params));
+ }
+ return HAILO_SUCCESS;
+}
+
+Expected<ConfigureNetworkParams> Hef::Impl::create_configure_params(hailo_stream_interface_t stream_interface, const std::string &network_group_name)
+{
+ auto params = HailoRTDefaults::get_configure_params();
+ auto stream_params_by_name = create_stream_parameters_by_name(network_group_name, stream_interface);
+ CHECK_EXPECTED(stream_params_by_name);
+ params.stream_params_by_name = stream_params_by_name.release();
+ auto network_params_by_name = create_network_parameters_by_name(network_group_name);
+ CHECK_EXPECTED(network_params_by_name);
+ params.network_params_by_name = network_params_by_name.release();
+
+ return params;
+}
+
+Expected<ConfigureNetworkParams> Hef::Impl::create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params, const std::string &network_group_name)
+{
+ auto params = HailoRTDefaults::get_configure_params();
+ auto stream_params_by_name = create_stream_parameters_by_name_mipi_input(network_group_name, output_interface, mipi_params);
+ CHECK_EXPECTED(stream_params_by_name);
+ params.stream_params_by_name = stream_params_by_name.release();
+ auto network_params_by_name = create_network_parameters_by_name(network_group_name);
+ CHECK_EXPECTED(network_params_by_name);
+ params.network_params_by_name = network_params_by_name.release();
+
+ return params;
+}
+
+Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::create_stream_parameters_by_name(
+ const std::string &net_group_name, hailo_stream_interface_t stream_interface)
+{
+ auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(network_group_name_pair);
+ auto net_group_name_str = network_group_name_pair->first;
+
+ return pimpl->create_stream_parameters_by_name(net_group_name_str, stream_interface);
+}
+
+Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::Impl::create_stream_parameters_by_name(
+ const std::string &net_group_name, hailo_stream_interface_t stream_interface)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ std::map<std::string, hailo_stream_parameters_t> results;
+ for (auto &input_layer : core_op_metadata->get_input_layer_infos()) {
+ auto params = HailoRTDefaults::get_stream_parameters(stream_interface, HAILO_H2D_STREAM);
+ CHECK_EXPECTED(params);
+ results.emplace(std::make_pair(input_layer.name, params.release()));
+ }
+ for (auto &output_layer : core_op_metadata->get_output_layer_infos()) {
+ auto params = HailoRTDefaults::get_stream_parameters(stream_interface, HAILO_D2H_STREAM);
+ CHECK_EXPECTED(params);
+ results.emplace(std::make_pair(output_layer.name, params.release()));
+ }
+
+ return results;
+}
+
+Expected<std::map<std::string, hailo_network_parameters_t>> Hef::create_network_parameters_by_name(
+ const std::string &net_group_name)
+{
+ return pimpl->create_network_parameters_by_name(net_group_name);
+}
+
+Expected<std::map<std::string, hailo_network_parameters_t>> Hef::Impl::create_network_parameters_by_name(
+ const std::string &net_group_name)
+{
+ auto core_op = get_core_op_by_net_group_name(net_group_name);
+ CHECK_EXPECTED(core_op);
+
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ std::map<std::string, hailo_network_parameters_t> results;
+
+ if (core_op_metadata->supported_features().multi_network_support) {
+ CHECK_AS_EXPECTED((core_op.value()->networks_names.size() != 0), HAILO_INTERNAL_FAILURE,
+ "Hef support multiple networks, but no networks found in the proto");
+ for (const auto &partial_network_name : core_op.value()->networks_names) {
+ auto network_name = HefUtils::get_network_name(net_group_name, partial_network_name);
+ auto params = HailoRTDefaults::get_network_parameters();
+ results.emplace(std::make_pair(network_name, params));
+ }
+ } else {
+ /* For hefs without the "networks_names" field, build default network name with default params */
+ auto params = HailoRTDefaults::get_network_parameters();
+ auto network_name = HailoRTDefaults::get_network_name(net_group_name);
+ results.emplace(std::make_pair(network_name, params));
+ }
+
+ return results;
+}
+
+Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::create_stream_parameters_by_name_mipi_input(
+ const std::string &net_group_name, hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params)
+{
+ auto network_group_name_pair = pimpl->get_network_group_and_network_name(net_group_name);
+ CHECK_EXPECTED(network_group_name_pair);
+ auto net_group_name_str = network_group_name_pair->first;
+
+ return pimpl->create_stream_parameters_by_name_mipi_input(net_group_name_str, output_interface, mipi_params);
+}
+
+Expected<std::map<std::string, hailo_stream_parameters_t>> Hef::Impl::create_stream_parameters_by_name_mipi_input(
+ const std::string &net_group_name, hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params)
+{
+ auto core_op_metadata = get_core_op_metadata(net_group_name);
+ CHECK_EXPECTED(core_op_metadata);
+
+ std::map<std::string, hailo_stream_parameters_t> results;
+ for (auto &input_layer : core_op_metadata->get_input_layer_infos()) {
+ hailo_stream_parameters_t params = {};
+ params.direction = HAILO_H2D_STREAM;
+ params.stream_interface = HAILO_STREAM_INTERFACE_MIPI;
+ params.mipi_input_params = mipi_params;
+ results.emplace(std::make_pair(input_layer.name, params));
+ }
+ for (auto &output_layer : core_op_metadata->get_output_layer_infos()) {
+ auto params = HailoRTDefaults::get_stream_parameters(output_interface, HAILO_D2H_STREAM);
+ CHECK_EXPECTED(params);
+ results.emplace(std::make_pair(output_layer.name, params.release()));
+ }
+
+ return results;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hef_internal.hpp
+ * @brief Internal definition of Hef class Impl
+ **/
+
+#ifndef _HEF_INTERNAL_HPP_
+#define _HEF_INTERNAL_HPP_
+
+// https://github.com/protocolbuffers/protobuf/tree/master/cmake#notes-on-compiler-warnings
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4244 4267 4127)
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+#endif
+#include "hef.pb.h"
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#else
+#pragma GCC diagnostic pop
+#endif
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/hef.hpp"
+#include "hailo/network_group.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "hef/core_op_metadata.hpp"
+#include "hef/layer_info.hpp"
+#include "hef/context_switch_actions.hpp"
+#include "net_flow/ops/op.hpp"
+#include "net_flow/pipeline/pipeline.hpp"
+#include "core_op/core_op.hpp"
+#include "device_common/control_protocol.hpp"
+
+#include "control_protocol.h"
+#include <functional>
+#include <bitset>
+#include <memory>
+
+extern "C" {
+#include "md5.h"
+}
+
+
+namespace hailort
+{
+
+class CoreOpMetadata;
+class CoreOp;
+using ProtoHEFNetworkGroupPtr = std::shared_ptr<ProtoHEFNetworkGroup>;
+
+struct ProtoHEFCoreOpMock;
+struct ProtoHEFPartialCoreOpMock {
+ ProtoHEFPartialCoreOpMock(std::shared_ptr<ProtoHEFCoreOpMock> core_op, const ProtoHEFPhysicalLayout &layout)
+ : core_op(core_op)
+ , layout(layout)
+ {}
+
+ ProtoHEFPartialCoreOpMock(const ProtoHEFPartialCoreOpMock &partial_core_op)
+ : core_op(partial_core_op.core_op)
+ , layout(partial_core_op.layout)
+ {}
+
+ std::shared_ptr<ProtoHEFCoreOpMock> core_op;
+ const ProtoHEFPhysicalLayout &layout;
+};
+
+struct ProtoHEFCoreOpMock {
+ ProtoHEFCoreOpMock(
+ const ProtoHEFNetworkGroupMetadata &network_group_metadata,
+ const ProtoHEFPreliminaryConfig &preliminary_config,
+ const google::protobuf::RepeatedPtrField<ProtoHEFContext> &contexts,
+ const google::protobuf::RepeatedPtrField<std::string> &sorted_outputs_order,
+ const ProtoHEFFusedLayersMetadata &fused_layers_metadata,
+ const google::protobuf::RepeatedPtrField<std::string> &networks_names,
+ const std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> &partial_core_ops)
+ : network_group_metadata(network_group_metadata),
+ preliminary_config(preliminary_config),
+ contexts(contexts),
+ sorted_outputs_order(sorted_outputs_order),
+ fused_layers_metadata(fused_layers_metadata),
+ networks_names(networks_names),
+ partial_core_ops(partial_core_ops)
+ {}
+
+ ProtoHEFCoreOpMock(const ProtoHEFCoreOpMock &core_op)
+ : network_group_metadata(core_op.network_group_metadata),
+ preliminary_config(core_op.preliminary_config),
+ contexts(core_op.contexts),
+ sorted_outputs_order(core_op.sorted_outputs_order),
+ fused_layers_metadata(core_op.fused_layers_metadata),
+ networks_names(core_op.networks_names),
+ partial_core_ops(core_op.partial_core_ops)
+ {}
+
+ const ProtoHEFNetworkGroupMetadata &network_group_metadata;
+ const ProtoHEFPreliminaryConfig &preliminary_config;
+ const google::protobuf::RepeatedPtrField<ProtoHEFContext> &contexts;
+ const google::protobuf::RepeatedPtrField<std::string> &sorted_outputs_order;
+ const ProtoHEFFusedLayersMetadata &fused_layers_metadata;
+ const google::protobuf::RepeatedPtrField<std::string> &networks_names;
+ std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> partial_core_ops;
+};
+
+#pragma pack(push, 1)
+typedef struct {
+ uint32_t magic;
+ uint32_t version;
+ uint32_t hef_proto_length;
+ uint32_t reserved;
+ MD5_SUM_t expected_md5;
+} hef__header_t;
+#pragma pack(pop)
+
+typedef enum {
+ HEF__FORMAT__TF_RGB = 0,
+ HEF__FORMAT__FRAMES,
+ HEF__FORMAT__FLAT,
+ HEF__FORMAT__FCR,
+ HEF__FORMAT__BAYER_RGB,
+ HEF__FORMAT__ARGMAX,
+ HEF__FORMAT__NMS,
+ HEF__FORMAT__F8CR,
+} HEF__net_io_formatter_type_t;
+
+struct NetFlowElement
+{
+ std::string name;
+ std::shared_ptr<net_flow::Op> op;
+ std::set<std::string> input_streams;
+ hailo_nms_info_t nms_info;
+};
+
+const static uint32_t SUPPORTED_EXTENSIONS_BITSET_SIZE = 1000;
+static const std::vector<ProtoHEFExtensionType> SUPPORTED_EXTENSIONS = {
+ ABBALE,
+ POSTED_WRITES,
+ DDR,
+ PADDED_DDR_BUFFERS,
+ IS_MULTI_CONTEXTS,
+ COMPRESSED_PARAMS,
+ TRANSPOSE_COMPONENT,
+ IS_NMS_MULTI_CONTEXT,
+ OFFLOAD_ARGMAX,
+ KO_RUN_ASAP,
+ HAILO_NET_FLOW,
+ HAILO_NET_FLOW_YOLO_NMS // Extention added in platform 4.12 release
+};
+
+static inline bool is_h2d_boundary_info_layer(const ProtoHEFEdgeLayer& layer)
+{
+ return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__HOST_TO_DEVICE == layer.direction()) &&
+ (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+ layer.context_switch_info().edge_connection_type()) &&
+ (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()));
+}
+
+static inline bool is_d2h_boundary_info_layer(const ProtoHEFEdgeLayer& layer)
+{
+ return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST == layer.direction()) &&
+ (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+ layer.context_switch_info().edge_connection_type()) &&
+ (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()));
+}
+
+static inline bool is_h2d_boundary_mux_layer(const ProtoHEFEdgeLayer& layer)
+{
+ return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__HOST_TO_DEVICE == layer.direction()) &&
+ (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+ layer.context_switch_info().edge_connection_type()) &&
+ (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()));
+}
+
+static inline bool is_d2h_boundary_mux_layer(const ProtoHEFEdgeLayer& layer)
+{
+ return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST == layer.direction()) &&
+ (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+ layer.context_switch_info().edge_connection_type()) &&
+ (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()));
+}
+
+// TODO: Fix the circular dependency (with HRT-2899, InputStream/OutputStream related code will move elsewhere)
+class InputStreamBase;
+class OutputStreamBase;
+
+// Forward declerations
+struct WriteMemoryInfo;
+class Device;
+class VdmaConfigCoreOp;
+class VdmaDevice;
+class HailoRTDriver;
+
+
+class Hef::Impl final
+{
+public:
+ static const uint32_t HEADER_MAGIC = 0x01484546;
+ static const uint32_t HEADER_VERSION = 0;
+
+ static Expected<Impl> create(const std::string &hef_path);
+ static Expected<Impl> create(const MemoryView &hef_buffer);
+
+ const std::vector<ProtoHEFNetworkGroupPtr>& network_groups() const;
+ const std::vector<ProtoHEFCoreOpMock>& core_ops(const std::string &net_group_name) const;
+ const std::vector<std::shared_ptr<NetFlowElement>> post_process_ops(const std::string &net_group_name) const;
+
+ Expected<std::pair<std::string, std::string>> get_network_group_and_network_name(const std::string &name);
+
+ Expected<std::shared_ptr<ProtoHEFCoreOpMock>> get_core_op_by_net_group_name(const std::string &net_group_name="");
+ Expected<std::vector<hailo_network_info_t>> get_network_infos(const std::string &net_group_name="");
+
+ Expected<std::vector<hailo_stream_info_t>> get_input_stream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+ Expected<std::vector<hailo_stream_info_t>> get_output_stream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+ Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+ Expected<hailo_stream_info_t> get_stream_info_by_name(const std::string &stream_name,
+ hailo_stream_direction_t stream_direction, const std::string &net_group_name="");
+
+ Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+ Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+ Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+ Expected<std::vector<std::string>> get_sorted_output_names(const std::string &net_group_name="");
+ Expected<size_t> get_number_of_input_streams(const std::string &net_group_name="");
+ Expected<size_t> get_number_of_output_streams(const std::string &net_group_name="");
+ ProtoHEFHwArch get_device_arch();
+ Expected<float64_t> get_bottleneck_fps(const std::string &net_group_name="");
+ static bool contains_ddr_layers(const ProtoHEFCoreOpMock &core_op);
+ static hailo_status validate_core_op_unique_layer_names(const ProtoHEFCoreOpMock &core_op);
+ Expected<std::vector<hailo_vstream_info_t>> get_network_input_vstream_infos(const std::string &net_group_name="",
+ const std::string &network_name="");
+
+ Expected<std::vector<std::string>> get_stream_names_from_vstream_name(const std::string &vstream_name,
+ const std::string &net_group_name="");
+ Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name,
+ const std::string &net_group_name="");
+
+ Expected<std::string> get_vstream_name_from_original_name(const std::string &original_name,
+ const std::string &net_group_name="");
+ Expected<std::vector<std::string>> get_original_names_from_vstream_name(const std::string &stream_name,
+ const std::string &net_group_name="");
+
+ std::vector<std::string> get_network_groups_names();
+ Expected<std::vector<hailo_network_group_info_t>> get_network_groups_infos();
+
+ Expected<ConfigureNetworkParams> create_configure_params(hailo_stream_interface_t stream_interface, const std::string &network_group_name);
+ Expected<ConfigureNetworkParams> create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params, const std::string &network_group_name);
+
+ static Expected<std::vector<WriteMemoryInfo>> create_single_context_core_op_config(
+ const ProtoHEFPreliminaryConfig& proto_config);
+
+ static Expected<std::shared_ptr<ProtoHEFCoreOpMock>> get_core_op_per_arch(const ProtoHEFCoreOpMock &core_op,
+ ProtoHEFHwArch hef_arch, hailo_device_architecture_t device_arch, uint32_t partial_clusters_layout_bitmap);
+
+ Expected<std::map<std::string, hailo_stream_parameters_t>> create_stream_parameters_by_name(
+ const std::string &net_group_name, hailo_stream_interface_t stream_interface);
+
+ Expected<std::map<std::string, hailo_network_parameters_t>> create_network_parameters_by_name(
+ const std::string &net_group_name);
+
+ Expected<std::map<std::string,hailo_stream_parameters_t>> create_stream_parameters_by_name_mipi_input(
+ const std::string &net_group_name, hailo_stream_interface_t output_interface,
+ const hailo_mipi_input_stream_params_t &mipi_params);
+
+ Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
+ const std::string &net_group_name, const std::string &network_name, bool quantized,
+ hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
+ hailo_status fill_missing_input_vstream_params_with_default(const std::string &net_group_name,
+ const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
+ Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
+ const std::string &net_group_name, const std::string &network_name, bool quantized,
+ hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
+ hailo_status fill_missing_output_vstream_params_with_default(const std::string &net_group_name,
+ const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &output_vstream_params,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
+ static hailo_status fill_missing_vstream_params_with_default(std::map<std::string, hailo_vstream_params_t> &vstream_params,
+ std::vector<hailo_vstream_info_t> &name_to_format_info, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
+ uint32_t queue_size);
+ // Also adds information to CoreOpMetadata
+ // TODO: When supporting multiple core ops in same netflow - Change metadata param to a map of core_ops_metadata.
+ Expected<std::vector<std::shared_ptr<NetFlowElement>>> create_net_flow_ops(const ProtoHEFNetworkGroup &network_group_proto,
+ CoreOpMetadata &core_op_metadata) const;
+
+ // TODO: Should return map of NG's core_ops metadata?
+ Expected<CoreOpMetadata> get_core_op_metadata(const std::string &network_group_name, uint32_t partial_clusters_layout_bitmap = PARTIAL_CLUSTERS_LAYOUT_IGNORE);
+
+ Expected<std::string> get_hef_description(bool stream_infos, bool vstream_infos, hailo_device_architecture_t device_arch);
+
+ const MD5_SUM_t &md5() const
+ {
+ return m_md5;
+ }
+
+ static hailo_status update_network_batch_size(ConfigureNetworkParams &network_group_config_params)
+ {
+ static_assert(HAILO_DEFAULT_BATCH_SIZE == 0, "Invalid HAILO_DEFAULT_BATCH_SIZE");
+
+ auto single_network_default_batch = (HAILO_DEFAULT_BATCH_SIZE == network_group_config_params.batch_size);
+ auto multi_network_default_batch = true;
+ /* Batch size overide logic - if user modifies network group batch size
+ and not the network batch size, */
+
+ for (auto const &network_params : network_group_config_params.network_params_by_name) {
+ if (HAILO_DEFAULT_BATCH_SIZE != network_params.second.batch_size) {
+ multi_network_default_batch = false;
+ }
+ }
+
+ CHECK((single_network_default_batch || multi_network_default_batch), HAILO_INVALID_OPERATION,
+ "User provided batch size for network group and for network as well. User is adviced to work with network's batch size only");
+
+ if (!single_network_default_batch && multi_network_default_batch) {
+ /* In case user works with network group, overide the network batch size.*/
+ for (auto &network_params : network_group_config_params.network_params_by_name) {
+ network_params.second.batch_size = network_group_config_params.batch_size;
+ }
+ }
+
+ return HAILO_SUCCESS;
+ }
+
+ // TODO: HRT-8875 - Change to validate all core ops under same ng or use this func in the new configure API and use this to validate each op when configured.
+ hailo_status validate_boundary_streams_were_created(const std::string &network_group_name, std::shared_ptr<CoreOp> core_op);
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ const MemoryView get_hef_memview();
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+private:
+ Impl(const std::string &hef_path, hailo_status &status);
+ Impl(const MemoryView &hef_memview, hailo_status &status);
+
+ hailo_status parse_hef_file(const std::string &hef_path);
+ hailo_status parse_hef_memview(const MemoryView &hef_memview);
+ hailo_status transfer_protobuf_field_ownership(ProtoHEFHef &hef_message);
+ void fill_core_ops();
+ hailo_status fill_networks_metadata();
+ void fill_extensions_bitset();
+ void init_md5(MD5_SUM_t &calculated_md5);
+
+ static bool check_hef_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
+ const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features);
+ // Note: If the network group is found, i.e has_value() is true on the returned object, then the underlying pointer is not null
+ static bool check_hef_optional_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
+ const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions);
+ static SupportedFeatures get_supported_features(const ProtoHEFHeader &header,
+ const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features,
+ const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions);
+
+ hailo_status validate_hef_extensions();
+ static hailo_status validate_hef_header(const hef__header_t &header, MD5_SUM_t &calculated_md5, size_t proto_size);
+
+ Expected<std::map<std::string, hailo_format_t>> get_inputs_vstream_names_and_format_info(
+ const std::string &net_group_name, const std::string &network_name);
+ Expected<std::map<std::string, hailo_format_t>> get_outputs_vstream_names_and_format_info(
+ const std::string &net_group_name, const std::string &network_name);
+
+ static Expected<std::string> get_vstream_name_from_original_name_mux(const std::string &original_name, const ProtoHefEdge &layer);
+ static Expected<std::vector<std::string>> get_original_names_from_vstream_name_mux(const std::string &vstream_name, const ProtoHefEdge &layer);
+
+ Expected<CoreOpMetadata> create_metadata_per_arch(const ProtoHEFCoreOpMock &core_op);
+ Expected<std::vector<std::string>> get_stream_infos_description(const std::string &network_group_name, const std::string &network_name);
+ Expected<std::vector<std::string>> get_vstream_infos_description(const std::string &network_group_name, const std::string &network_name);
+ Expected<std::vector<std::string>> get_post_processes_infos_description(const std::string &network_group_name);
+
+ // Hef information
+ ProtoHEFHeader m_header;
+ ProtoHEFIncludedFeatures m_included_features;
+ SupportedFeatures m_supported_features;
+ std::vector<ProtoHEFNetworkGroupPtr> m_groups;
+ std::map<std::string, std::vector<ProtoHEFCoreOpMock>> m_core_ops_per_group;
+ std::map<std::string, std::vector<std::shared_ptr<NetFlowElement>>> m_post_process_ops_per_group;
+ std::vector<ProtoHEFExtension> m_hef_extensions;
+ std::vector<ProtoHEFOptionalExtension> m_hef_optional_extensions;
+ std::bitset<SUPPORTED_EXTENSIONS_BITSET_SIZE> m_supported_extensions_bitset;
+ MD5_SUM_t m_md5;
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ Buffer m_hef_buffer;
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+ // CoreOps information - TODO: Should be a map of map, mapping network_groups to it's core ops (second map is mapping core op name to its metadata).
+ std::map<std::string, CoreOpMetadataPerArch> m_core_op_per_arch;
+};
+
+// TODO: Make this part of a namespace? (HRT-2881)
+/* TODO: Create LayerInfo for all layers in the HEF (including inter-context and DDR), and use it for parsing additional info without proto dependency
+ After this will be done, this class should move to layer_info.hpp */
+class HefConfigurator final
+{
+public:
+ HefConfigurator() = delete;
+
+ static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(const ProtoHEFEdgeLayerBase &edge_layer,
+ bool hw_padding_supported, const ProtoHEFEdgeConnectionType &edge_connection_type);
+ static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(const LayerInfo &edge_layer,
+ bool hw_padding_supported);
+
+ static bool is_hw_padding_supported(const ProtoHEFEdgeLayer &edge_layer);
+ static bool is_hw_padding_supported(const LayerInfo &layer_info);
+private:
+ static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(hailo_format_order_t format_order,
+ uint32_t width, uint32_t features, uint32_t hw_data_bytes, uint16_t core_buffers_per_frame,
+ uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr);
+
+ static bool is_hw_padding_supported(bool is_boundary, bool is_mux, hailo_format_order_t format_order,
+ uint16_t core_buffers_per_frame, uint32_t height, uint32_t width, uint32_t features, uint32_t hw_data_bytes);
+};
+
+class HefUtils final
+{
+public:
+ HefUtils() = delete;
+
+ static hailo_status fill_boundary_layers_info(
+ const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer,
+ const SupportedFeatures &supported_features,
+ ContextMetadata &context_metadata);
+ static Expected<LayerInfo> get_inter_context_layer_info(
+ const ProtoHEFCoreOpMock &core_op, const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features);
+ static hailo_status fill_inter_context_layers_info(
+ const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer,
+ const SupportedFeatures &supported_features,
+ ContextMetadata &context_metadata);
+ static Expected<LayerInfo> get_ddr_layer_info(
+ const ProtoHEFCoreOpMock &core_op, const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features);
+ static hailo_status fill_ddr_layers_info(
+ const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index,
+ const ProtoHEFEdgeLayer &layer,
+ const SupportedFeatures &supported_features,
+ ContextMetadata &context_metadata);
+ static hailo_status check_ddr_pairs_match(
+ const std::vector<LayerInfo> &context_ddr_input_layers,
+ const std::vector<LayerInfo> &context_ddr_output_layers,
+ const uint8_t context_index);
+ static Expected<ContextMetadata> parse_preliminary_context(const ProtoHEFPreliminaryConfig &preliminary_proto,
+ const SupportedFeatures &supported_features);
+ static Expected<ContextMetadata> parse_single_dynamic_context(const ProtoHEFCoreOpMock &core_op,
+ const ProtoHEFContext &context_proto, uint8_t context_index, const SupportedFeatures &supported_features);
+ static Expected<std::vector<ContextMetadata>> parse_dynamic_contexts(const ProtoHEFCoreOpMock &core_op,
+ const SupportedFeatures &supported_features);
+ static Expected<hailo_nms_info_t> parse_proto_nms_info(const ProtoHEFNmsInfo &proto_nms_info);
+ static Expected<LayerInfo> get_boundary_layer_info(const ProtoHEFCoreOpMock &core_op,
+ const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features);
+ static Expected<std::vector<std::string>> get_sorted_output_names(const ProtoHEFCoreOpMock &core_op);
+
+ static Expected<std::string> get_partial_network_name_by_index(const ProtoHEFCoreOpMock &core_op, uint8_t network_index, const SupportedFeatures &supported_features);
+
+ static Expected<std::vector<hailo_network_info_t>> get_network_infos(const ProtoHEFNetworkGroup &net_group,
+ const std::string &net_group_name, const SupportedFeatures &supported_features);
+
+ static std::string get_network_group_name(const ProtoHEFNetworkGroup &net_group, const SupportedFeatures &supported_features);
+ static std::string get_network_name(const ProtoHEFCoreOpMock &core_op, const std::string &partial_network_name);
+ static std::string get_network_name(const std::string &net_group_name, const std::string &partial_network_name);
+
+private:
+ static hailo_status fill_layer_info_with_base_info(const ProtoHEFEdgeLayerBase &base_info,
+ const ProtoHEFEdgeConnectionType &edge_connection_type,
+ const ProtoHEFNetworkGroupMetadata &network_group_proto, bool hw_padding_supported, bool transposed,
+ const uint8_t context_index, const uint8_t network_index, LayerInfo &layer_info);
+ static hailo_status fill_layer_info(const ProtoHEFEdgeLayerInfo &info,
+ const ProtoHEFEdgeConnectionType &edge_connection_type,
+ const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
+ bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
+ uint8_t network_index, LayerInfo &layer_info);
+ static hailo_status fill_fused_nms_info(const ProtoHEFEdgeLayerFused &info,
+ LayerInfo &layer_info, hailo_quant_info_t &defuse_quant_info, const std::string &network_name);
+ static hailo_status fill_mux_info(const ProtoHEFEdgeLayerMux &info,
+ const ProtoHEFEdgeConnectionType &edge_connection_type,
+ const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
+ bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
+ uint8_t network_index, LayerInfo &layer_info);
+};
+
+} /* namespace hailort */
+
+#endif /* _HEF_INTERNAL_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hef.hpp
+ * @brief Hef parsing and configuration functions
+ **/
+
+#ifndef _HAILO_LAYER_INFO_HPP_
+#define _HAILO_LAYER_INFO_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/hailort_common.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "os/hailort_driver.hpp"
+
+#include "control_protocol.h"
+#include <vector>
+#include <memory>
+#include <map>
+
+
+namespace hailort
+{
+
+#define INVALID_PAD_INDEX (UINT32_MAX)
+
+enum class LayerType
+{
+ NOT_SET = 0,
+ BOUNDARY = 1,
+ INTER_CONTEXT = 2,
+ DDR = 3,
+ CFG = 4
+};
+
+struct BufferIndices {
+ uint32_t index;
+ uint32_t cluster_index;
+};
+
+struct ConnectedContextInfo {
+ uint8_t context_index;
+ uint8_t dma_engine_index;
+ uint8_t stream_index;
+};
+
+struct DdrInfo {
+ // total_buffers_per_frame not same as core_buffer_per frame.
+ //(In DDR core buffer per frame is 1). Used to calc total host descriptors_per_frame.
+ uint16_t total_buffers_per_frame;
+ uint16_t min_buffered_rows;
+};
+
+
+struct LayerInfo {
+ LayerType type = LayerType::NOT_SET;
+ hailo_stream_direction_t direction;
+ uint8_t stream_index;
+ uint8_t dma_engine_index;
+ std::string name;
+ std::string network_name;
+ uint8_t network_index;
+ CONTROL_PROTOCOL__nn_stream_config_t nn_stream_config;
+ uint32_t max_shmifo_size;
+ uint8_t context_index;
+ uint32_t pad_index = INVALID_PAD_INDEX;
+
+ // Transformation and shape info
+ hailo_3d_image_shape_t shape;
+ hailo_3d_image_shape_t hw_shape;
+ uint32_t hw_data_bytes;
+ hailo_format_t format;
+ hailo_quant_info_t quant_info;
+ hailo_nms_info_t nms_info;
+
+ // Mux info
+ bool is_mux;
+ std::vector<LayerInfo> predecessor;
+ uint32_t height_gcd;
+ std::vector<uint32_t> height_ratios;
+
+ // Defused nms info
+ bool is_defused_nms;
+ // TODO HRT-4441 change fused_layer from vector.
+ std::vector<LayerInfo> fused_nms_layer;
+
+ // Simulation Info
+ BufferIndices buffer_indices;
+
+ // Context switch info TODO: we should use std::optional for this structures (or implement our self).
+ ConnectedContextInfo connected_context_info;
+ DdrInfo ddr_info;
+};
+
+// LayerIdentifier = <LayerType, layer_name, stream_index>
+using LayerIdentifier = std::tuple<LayerType, std::string, uint8_t>;
+
+inline LayerIdentifier to_layer_identifier(const LayerInfo &info)
+{
+ return std::make_tuple(info.type, info.name, info.stream_index);
+}
+
+class LayerInfoUtils {
+public:
+ static hailo_stream_info_t get_stream_info_from_layer_info(const LayerInfo &layer_info)
+ {
+ hailo_stream_info_t res = {};
+ res.hw_data_bytes = layer_info.hw_data_bytes;
+ res.format = layer_info.format;
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == res.format.order) {
+ res.nms_info = layer_info.nms_info;
+ res.hw_frame_size =
+ HailoRTCommon::get_nms_hw_frame_size(res.nms_info);
+ } else {
+ res.shape.height = layer_info.shape.height;
+ res.shape.width = layer_info.shape.width;
+ res.shape.features = layer_info.shape.features;
+ res.hw_shape.height = layer_info.hw_shape.height;
+ res.hw_shape.width = layer_info.hw_shape.width;
+ res.hw_shape.features = layer_info.hw_shape.features;
+ res.hw_frame_size =
+ res.hw_shape.height * res.hw_shape.width * res.hw_shape.features * res.hw_data_bytes;
+ }
+ res.direction = layer_info.direction;
+ res.index = layer_info.stream_index;
+ assert(layer_info.name.length() < HAILO_MAX_NAME_SIZE);
+ strncpy(res.name, layer_info.name.c_str(), layer_info.name.length() + 1);
+ res.quant_info = layer_info.quant_info;
+ res.is_mux = layer_info.is_mux;
+
+ return res;
+ }
+
+ static bool vstream_info_already_in_vector(const std::vector<hailo_vstream_info_t> &vec, const std::string &name)
+ {
+ for (const auto &info : vec) {
+ if (name == info.name) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static std::vector<hailo_vstream_info_t> get_vstream_infos_from_layer_info(const LayerInfo &layer_info)
+ {
+ std::vector<hailo_vstream_info_t> res = {};
+ if (layer_info.is_mux) {
+ for (auto &pred : layer_info.predecessor) {
+ auto vstream_infos = get_vstream_infos_from_layer_info(pred);
+ res.insert(res.end(), vstream_infos.begin(), vstream_infos.end());
+ }
+ } else if (layer_info.is_defused_nms) {
+ for (auto &fused_nms : layer_info.fused_nms_layer) {
+ // In case of fused nms layers, several LayerInfos will contain data about the same fused layer
+ if (!vstream_info_already_in_vector(res, fused_nms.name)) {
+ auto vstream_info = get_vstream_info_from_layer_info_impl(fused_nms);
+ res.push_back(vstream_info);
+ }
+ }
+ } else {
+ auto vstream_info = get_vstream_info_from_layer_info_impl(layer_info);
+ res.push_back(vstream_info);
+ }
+
+ return res;
+ }
+
+ static Expected<size_t> get_transfer_size(const LayerInfo &layer_info) {
+ switch (layer_info.type) {
+ case LayerType::BOUNDARY:
+ case LayerType::INTER_CONTEXT:
+ return layer_info.nn_stream_config.periph_bytes_per_buffer * layer_info.nn_stream_config.periph_buffers_per_frame;
+ case LayerType::DDR:
+ return layer_info.nn_stream_config.periph_bytes_per_buffer * layer_info.ddr_info.total_buffers_per_frame;
+ default:
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+ }
+ }
+
+private:
+ static hailo_vstream_info_t get_vstream_info_from_layer_info_impl(const LayerInfo &layer_info)
+ {
+ hailo_vstream_info_t res = {};
+ res.format.type = layer_info.format.type;
+ res.format.flags = layer_info.format.flags;
+ res.format.order = HailoRTDefaults::get_default_host_format_order(layer_info.format);
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == res.format.order) {
+ res.nms_shape.max_bboxes_per_class = layer_info.nms_info.max_bboxes_per_class * layer_info.nms_info.chunks_per_frame;
+ res.nms_shape.number_of_classes = layer_info.nms_info.number_of_classes;
+ } else {
+ res.shape.height = layer_info.shape.height;
+ res.shape.width = layer_info.shape.width;
+ res.shape.features = layer_info.shape.features;
+ }
+ res.direction = layer_info.direction;
+ assert(layer_info.name.length() < HAILO_MAX_STREAM_NAME_SIZE);
+ strncpy(res.name, layer_info.name.c_str(), layer_info.name.length() + 1);
+ assert(layer_info.network_name.length() < HAILO_MAX_NETWORK_NAME_SIZE);
+ strncpy(res.network_name, layer_info.network_name.c_str(), layer_info.network_name.length() + 1);
+ res.quant_info = layer_info.quant_info;
+
+ return res;
+ }
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_LAYER_INFO_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hef_internal.hpp
- * @brief Internal definition of Hef class Impl
- **/
-
-#ifndef _HEF_INTERNAL_HPP_
-#define _HEF_INTERNAL_HPP_
-
-// https://github.com/protocolbuffers/protobuf/tree/master/cmake#notes-on-compiler-warnings
-#if defined(_MSC_VER)
-#pragma warning(push)
-#pragma warning(disable: 4244 4267 4127)
-#else
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-#include "hef.pb.h"
-#if defined(_MSC_VER)
-#pragma warning( pop )
-#else
-#pragma GCC diagnostic pop
-#endif
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/hef.hpp"
-#include "hailo/network_group.hpp"
-#include "context_switch/context_switch_actions.hpp"
-#include "layer_info.hpp"
-#include "network_group_metadata.hpp"
-#include "hailort_defaults.hpp"
-#include "control_protocol.h"
-#include "pipeline.hpp"
-
-#include "control_protocol.hpp"
-
-#include <functional>
-#include <bitset>
-#include <memory>
-
-extern "C" {
-#include "md5.h"
-}
-
-namespace hailort
-{
-
-class ResourcesManager;
-class ConfigBuffer;
-using ProtoHEFNetworkGroupPtr = std::shared_ptr<ProtoHEFNetworkGroup>;
-
-struct ProtoHEFCoreOpMock;
-struct ProtoHEFPartialCoreOpMock {
- ProtoHEFPartialCoreOpMock(std::shared_ptr<ProtoHEFCoreOpMock> core_op, const ProtoHEFPhysicalLayout &layout)
- : core_op(core_op)
- , layout(layout)
- {}
-
- ProtoHEFPartialCoreOpMock(const ProtoHEFPartialCoreOpMock &partial_core_op)
- : core_op(partial_core_op.core_op)
- , layout(partial_core_op.layout)
- {}
-
- std::shared_ptr<ProtoHEFCoreOpMock> core_op;
- const ProtoHEFPhysicalLayout &layout;
-};
-
-struct ProtoHEFCoreOpMock {
- ProtoHEFCoreOpMock(
- const ProtoHEFNetworkGroupMetadata &network_group_metadata,
- const ProtoHEFPreliminaryConfig &preliminary_config,
- const google::protobuf::RepeatedPtrField<ProtoHEFContext> &contexts,
- const google::protobuf::RepeatedPtrField<std::string> &sorted_outputs_order,
- const ProtoHEFFusedLayersMetadata &fused_layers_metadata,
- const google::protobuf::RepeatedPtrField<std::string> &networks_names,
- const std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> &partial_core_ops)
- : network_group_metadata(network_group_metadata),
- preliminary_config(preliminary_config),
- contexts(contexts),
- sorted_outputs_order(sorted_outputs_order),
- fused_layers_metadata(fused_layers_metadata),
- networks_names(networks_names),
- partial_core_ops(partial_core_ops)
- {}
-
- ProtoHEFCoreOpMock(const ProtoHEFCoreOpMock &core_op)
- : network_group_metadata(core_op.network_group_metadata),
- preliminary_config(core_op.preliminary_config),
- contexts(core_op.contexts),
- sorted_outputs_order(core_op.sorted_outputs_order),
- fused_layers_metadata(core_op.fused_layers_metadata),
- networks_names(core_op.networks_names),
- partial_core_ops(core_op.partial_core_ops)
- {}
-
- const ProtoHEFNetworkGroupMetadata &network_group_metadata;
- const ProtoHEFPreliminaryConfig &preliminary_config;
- const google::protobuf::RepeatedPtrField<ProtoHEFContext> &contexts;
- const google::protobuf::RepeatedPtrField<std::string> &sorted_outputs_order;
- const ProtoHEFFusedLayersMetadata &fused_layers_metadata;
- const google::protobuf::RepeatedPtrField<std::string> &networks_names;
- std::vector<std::shared_ptr<ProtoHEFPartialCoreOpMock>> partial_core_ops;
-};
-
-#pragma pack(push, 1)
-typedef struct {
- uint32_t magic;
- uint32_t version;
- uint32_t hef_proto_length;
- uint32_t reserved;
- MD5_SUM_t expected_md5;
-} hef__header_t;
-#pragma pack(pop)
-
-typedef enum {
- HEF__FORMAT__TF_RGB = 0,
- HEF__FORMAT__FRAMES,
- HEF__FORMAT__FLAT,
- HEF__FORMAT__FCR,
- HEF__FORMAT__BAYER_RGB,
- HEF__FORMAT__ARGMAX,
- HEF__FORMAT__NMS,
- HEF__FORMAT__F8CR,
-} HEF__net_io_formatter_type_t;
-
-
-const static uint32_t SUPPORTED_EXTENSIONS_BITSET_SIZE = 1000;
-static const std::vector<ProtoHEFExtensionType> SUPPORTED_EXTENSIONS = {
- ABBALE,
- POSTED_WRITES,
- DDR,
- PADDED_DDR_BUFFERS,
- IS_MULTI_CONTEXTS,
- COMPRESSED_PARAMS,
- TRANSPOSE_COMPONENT,
- IS_NMS_MULTI_CONTEXT,
- OFFLOAD_ARGMAX,
- KO_RUN_ASAP,
- HAILO_NET_FLOW,
- HAILO_NET_FLOW_YOLO_NMS // Extention added in platform 4.12 release
-};
-
-static inline bool is_h2d_boundary_info_layer(const ProtoHEFEdgeLayer& layer)
-{
- return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__HOST_TO_DEVICE == layer.direction()) &&
- (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
- layer.context_switch_info().edge_connection_type()) &&
- (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()));
-}
-
-static inline bool is_d2h_boundary_info_layer(const ProtoHEFEdgeLayer& layer)
-{
- return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST == layer.direction()) &&
- (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
- layer.context_switch_info().edge_connection_type()) &&
- (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__INFO == layer.edge_layer_type()));
-}
-
-static inline bool is_h2d_boundary_mux_layer(const ProtoHEFEdgeLayer& layer)
-{
- return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__HOST_TO_DEVICE == layer.direction()) &&
- (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
- layer.context_switch_info().edge_connection_type()) &&
- (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()));
-}
-
-static inline bool is_d2h_boundary_mux_layer(const ProtoHEFEdgeLayer& layer)
-{
- return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__DEVICE_TO_HOST == layer.direction()) &&
- (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
- layer.context_switch_info().edge_connection_type()) &&
- (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()));
-}
-
-// TODO: Fix the circular dependency (with HRT-2899, InputStream/OutputStream related code will move elsewhere)
-class InputStreamBase;
-class OutputStreamBase;
-
-// Forward declerations
-struct WriteMemoryInfo;
-class Device;
-class VdmaConfigNetworkGroup;
-class VdmaDevice;
-class HailoRTDriver;
-
-
-class Hef::Impl final
-{
-public:
- static const uint32_t HEADER_MAGIC = 0x01484546;
- static const uint32_t HEADER_VERSION = 0;
-
- static Expected<Impl> create(const std::string &hef_path);
- static Expected<Impl> create(const MemoryView &hef_buffer);
-
- const std::vector<ProtoHEFNetworkGroupPtr>& network_groups() const;
- const std::vector<ProtoHEFCoreOpMock>& core_ops(const std::string &net_group_name) const;
- const std::vector<std::shared_ptr<hailort::NetFlowElement>> post_process_ops(const std::string &net_group_name) const;
-
- Expected<std::pair<std::string, std::string>> get_network_group_and_network_name(const std::string &name);
-
- Expected<std::shared_ptr<ProtoHEFCoreOpMock>> get_core_op_by_net_group_name(const std::string &net_group_name="");
- Expected<std::vector<hailo_network_info_t>> get_network_infos(const std::string &net_group_name="");
-
- Expected<std::vector<hailo_stream_info_t>> get_input_stream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
- Expected<std::vector<hailo_stream_info_t>> get_output_stream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
- Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
- Expected<hailo_stream_info_t> get_stream_info_by_name(const std::string &stream_name,
- hailo_stream_direction_t stream_direction, const std::string &net_group_name="");
-
- Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
- Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
- Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
- Expected<std::vector<std::string>> get_sorted_output_names(const std::string &net_group_name="");
- Expected<size_t> get_number_of_input_streams(const std::string &net_group_name="");
- Expected<size_t> get_number_of_output_streams(const std::string &net_group_name="");
- ProtoHEFHwArch get_device_arch();
- Expected<float64_t> get_bottleneck_fps(const std::string &net_group_name="");
- static bool contains_ddr_layers(const ProtoHEFCoreOpMock &net_group);
- static hailo_status validate_core_op_unique_layer_names(const ProtoHEFCoreOpMock &core_op);
- Expected<std::vector<hailo_vstream_info_t>> get_network_input_vstream_infos(const std::string &net_group_name="",
- const std::string &network_name="");
-
- Expected<std::vector<std::string>> get_stream_names_from_vstream_name(const std::string &vstream_name,
- const std::string &net_group_name="");
- Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name,
- const std::string &net_group_name="");
-
- Expected<std::string> get_vstream_name_from_original_name(const std::string &original_name,
- const std::string &net_group_name="");
- Expected<std::vector<std::string>> get_original_names_from_vstream_name(const std::string &stream_name,
- const std::string &net_group_name="");
-
- std::vector<std::string> get_network_groups_names();
- Expected<std::vector<hailo_network_group_info_t>> get_network_groups_infos();
-
- Expected<ConfigureNetworkParams> create_configure_params(hailo_stream_interface_t stream_interface, const std::string &network_gorup_name);
- Expected<ConfigureNetworkParams> create_configure_params_mipi_input(hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params, const std::string &network_gorup_name);
-
- static Expected<std::vector<WriteMemoryInfo>> create_single_context_network_group_config(
- const ProtoHEFPreliminaryConfig& proto_config);
-
- static Expected<std::shared_ptr<ProtoHEFCoreOpMock>> get_core_op_per_arch(const ProtoHEFCoreOpMock &base_net_group,
- ProtoHEFHwArch hef_arch, hailo_device_architecture_t device_arch, uint32_t partial_clusters_layout_bitmap);
-
- Expected<std::map<std::string, hailo_stream_parameters_t>> create_stream_parameters_by_name(
- const std::string &net_group_name, hailo_stream_interface_t stream_interface);
-
- Expected<std::map<std::string, hailo_network_parameters_t>> create_network_parameters_by_name(
- const std::string &net_group_name);
-
- Expected<std::map<std::string,hailo_stream_parameters_t>> create_stream_parameters_by_name_mipi_input(
- const std::string &net_group_name, hailo_stream_interface_t output_interface,
- const hailo_mipi_input_stream_params_t &mipi_params);
-
- Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
- const std::string &net_group_name, const std::string &network_name, bool quantized,
- hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
- hailo_status fill_missing_input_vstream_params_with_default(const std::string &net_group_name,
- const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
- Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
- const std::string &net_group_name, const std::string &network_name, bool quantized,
- hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
- hailo_status fill_missing_output_vstream_params_with_default(const std::string &net_group_name,
- const std::string &network_name, std::map<std::string, hailo_vstream_params_t> &output_vstream_params,
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
- static hailo_status fill_missing_vstream_params_with_default(std::map<std::string, hailo_vstream_params_t> &vstream_params,
- std::vector<hailo_vstream_info_t> &name_to_format_info, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms,
- uint32_t queue_size);
- // Also adds information to NetworkGroupMetadata
- Expected<std::vector<std::shared_ptr<NetFlowElement>>> create_network_group_ops(const ProtoHEFNetworkGroup &network_group_proto,
- NetworkGroupMetadata &network_group_meta_data) const;
-
-
- Expected<NetworkGroupMetadata> get_network_group_metadata(const std::string &network_group_name, uint32_t partial_clusters_layout_bitmap = PARTIAL_CLUSTERS_LAYOUT_IGNORE)
- {
- CHECK_AS_EXPECTED(contains(m_network_group_metadata_per_arch, network_group_name), HAILO_NOT_FOUND,
- "Network group with name {} wasn't found", network_group_name);
- auto metadata_per_arch = m_network_group_metadata_per_arch.at(network_group_name);
- auto metadata = metadata_per_arch.get_metadata(partial_clusters_layout_bitmap);
- return metadata;
- }
-
- const MD5_SUM_t &md5() const
- {
- return m_md5;
- }
-
- static hailo_status update_network_batch_size(ConfigureNetworkParams &network_group_config_params)
- {
- static_assert(HAILO_DEFAULT_BATCH_SIZE == 0, "Invalid HAILO_DEFAULT_BATCH_SIZE");
-
- auto single_network_default_batch = (HAILO_DEFAULT_BATCH_SIZE == network_group_config_params.batch_size);
- auto multi_network_default_batch = true;
- /* Batch size overide logic - if user modifies network group batch size
- and not the network batch size, */
-
- for (auto const &network_params : network_group_config_params.network_params_by_name) {
- if (HAILO_DEFAULT_BATCH_SIZE != network_params.second.batch_size) {
- multi_network_default_batch = false;
- }
- }
-
- CHECK((single_network_default_batch || multi_network_default_batch), HAILO_INVALID_OPERATION,
- "User provided batch size for network group and for network as well. User is adviced to work with network's batch size only");
-
- if (!single_network_default_batch && multi_network_default_batch) {
- /* In case user works with network group, overide the network batch size.*/
- for (auto &network_params : network_group_config_params.network_params_by_name) {
- network_params.second.batch_size = network_group_config_params.batch_size;
- }
- }
-
- return HAILO_SUCCESS;
- }
-
- hailo_status validate_boundary_streams_were_created(const std::string &network_group_name, ConfiguredNetworkGroup &network_group)
- {
- auto number_of_inputs = get_number_of_input_streams(network_group_name);
- CHECK_EXPECTED_AS_STATUS(number_of_inputs);
- CHECK((number_of_inputs.value() == network_group.get_input_streams().size()),
- HAILO_INVALID_ARGUMENT, "passed configure_params for network group {} did not contain all input streams", network_group_name);
-
- auto number_of_outputs = get_number_of_output_streams(network_group_name);
- CHECK_EXPECTED_AS_STATUS(number_of_inputs);
- CHECK((number_of_outputs.value() == network_group.get_output_streams().size()),
- HAILO_INVALID_ARGUMENT, "passed configure_params for network group {} did not contain all output streams", network_group_name);
-
- return HAILO_SUCCESS;
- }
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
- const MemoryView get_hef_memview();
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-private:
- Impl(const std::string &hef_path, hailo_status &status);
- Impl(const MemoryView &hef_memview, hailo_status &status);
-
- hailo_status parse_hef_file(const std::string &hef_path);
- hailo_status parse_hef_memview(const MemoryView &hef_memview);
- hailo_status transfer_protobuf_field_ownership(ProtoHEFHef &hef_message);
- void fill_core_ops();
- hailo_status fill_networks_metadata();
- void fill_extensions_bitset();
- void init_md5(MD5_SUM_t &calculated_md5);
-
- static bool check_hef_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
- const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features);
- // Note: If the network group is found, i.e has_value() is true on the returned object, then the underlying pointer is not null
- static bool check_hef_optional_extension(const ProtoHEFExtensionType &extension, const ProtoHEFHeader &header,
- const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions);
- static SupportedFeatures get_supported_features(const ProtoHEFHeader &header,
- const std::vector<ProtoHEFExtension> &hef_extensions, const ProtoHEFIncludedFeatures &included_features,
- const std::vector<ProtoHEFOptionalExtension> &hef_optional_extensions);
-
- hailo_status validate_hef_extensions();
- static hailo_status validate_hef_header(const hef__header_t &header, MD5_SUM_t &calculated_md5, size_t proto_size);
-
- Expected<std::map<std::string, hailo_format_t>> get_inputs_vstream_names_and_format_info(
- const std::string &net_group_name, const std::string &network_name);
- Expected<std::map<std::string, hailo_format_t>> get_outputs_vstream_names_and_format_info(
- const std::string &net_group_name, const std::string &network_name);
-
- static Expected<std::string> get_vstream_name_from_original_name_mux(const std::string &original_name, const ProtoHefEdge &layer);
- static Expected<std::vector<std::string>> get_original_names_from_vstream_name_mux(const std::string &vstream_name, const ProtoHefEdge &layer);
-
- Expected<NetworkGroupMetadata> create_metadata_per_arch(const ProtoHEFCoreOpMock &core_op);
-
- // Hef information
- ProtoHEFHeader m_header;
- ProtoHEFIncludedFeatures m_included_features;
- SupportedFeatures m_supported_features;
- std::vector<ProtoHEFNetworkGroupPtr> m_groups;
- std::map<std::string, std::vector<ProtoHEFCoreOpMock>> m_core_ops_per_group;
- std::map<std::string, std::vector<std::shared_ptr<hailort::NetFlowElement>>> m_post_process_ops_per_group;
- std::vector<ProtoHEFExtension> m_hef_extensions;
- std::vector<ProtoHEFOptionalExtension> m_hef_optional_extensions;
- std::bitset<SUPPORTED_EXTENSIONS_BITSET_SIZE> m_supported_extensions_bitset;
- MD5_SUM_t m_md5;
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
- Buffer m_hef_buffer;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
- // NetworkGroups information
- std::map<std::string, NetworkGroupMetadataPerArch> m_network_group_metadata_per_arch; // TODO: keep meta data per core_op (HRT-8639)
-};
-
-// TODO: Make this part of a namespace? (HRT-2881)
-/* TODO: Create LayerInfo for all layers in the HEF (including inter-context and DDR), and use it for parsing additional info without proto dependency
- After this will be done, this class should move to layer_info.hpp */
-class HefConfigurator final
-{
-public:
- HefConfigurator() = delete;
-
- static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(const ProtoHEFEdgeLayerBase &edge_layer,
- bool hw_padding_supported, const ProtoHEFEdgeConnectionType &edge_connection_type);
- static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(const LayerInfo &edge_layer,
- bool hw_padding_supported);
-
- static bool is_hw_padding_supported(const ProtoHEFEdgeLayer &edge_layer);
- static bool is_hw_padding_supported(const LayerInfo &layer_info);
-private:
- static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(hailo_format_order_t format_order,
- uint32_t width, uint32_t features, uint32_t hw_data_bytes, uint16_t core_buffers_per_frame,
- uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr);
-
- static bool is_hw_padding_supported(bool is_boundary, bool is_mux, hailo_format_order_t format_order,
- uint16_t core_buffers_per_frame, uint32_t height, uint32_t width, uint32_t features, uint32_t hw_data_bytes);
-};
-
-class HefUtils final
-{
-public:
- HefUtils() = delete;
-
- static hailo_status fill_boundary_layers_info(
- const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer,
- const SupportedFeatures &supported_features,
- ContextMetadata &context_metadata);
- static Expected<LayerInfo> get_inter_context_layer_info(
- const ProtoHEFCoreOpMock &core_op, const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features);
- static hailo_status fill_inter_context_layers_info(
- const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer,
- const SupportedFeatures &supported_features,
- ContextMetadata &context_metadata);
- static Expected<LayerInfo> get_ddr_layer_info(
- const ProtoHEFCoreOpMock &core_op, const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features);
- static hailo_status fill_ddr_layers_info(
- const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index,
- const ProtoHEFEdgeLayer &layer,
- const SupportedFeatures &supported_features,
- ContextMetadata &context_metadata);
- static hailo_status check_ddr_pairs_match(
- const std::vector<LayerInfo> &context_ddr_input_layers,
- const std::vector<LayerInfo> &context_ddr_output_layers,
- const uint8_t context_index);
- static Expected<PreliminaryContextMetadata> parse_preliminary_context(const ProtoHEFPreliminaryConfig &preliminary_proto,
- const SupportedFeatures &supported_features);
- static Expected<ContextMetadata> parse_single_dynamic_context(const ProtoHEFCoreOpMock &core_op,
- const ProtoHEFContext &context_proto, uint8_t context_index, const SupportedFeatures &supported_features);
- static Expected<std::vector<ContextMetadata>> parse_dynamic_contexts(const ProtoHEFCoreOpMock &core_op,
- const SupportedFeatures &supported_features);
- static Expected<hailo_nms_info_t> parse_proto_nms_info(const ProtoHEFNmsInfo &proto_nms_info);
- static Expected<LayerInfo> get_boundary_layer_info(const ProtoHEFCoreOpMock &core_op,
- const uint8_t context_index, const ProtoHEFEdgeLayer &layer, const SupportedFeatures &supported_features);
- static Expected<std::vector<std::string>> get_sorted_output_names(const ProtoHEFCoreOpMock &core_op);
-
- static Expected<std::string> get_partial_network_name_by_index(const ProtoHEFCoreOpMock &core_op, uint8_t network_index, const SupportedFeatures &supported_features);
-
- static Expected<std::vector<hailo_network_info_t>> get_network_infos(const ProtoHEFNetworkGroup &net_group,
- const std::string &net_group_name, const SupportedFeatures &supported_features);
-
- static std::string get_network_group_name(const ProtoHEFNetworkGroup &net_group, const SupportedFeatures &supported_features);
- static std::string get_network_name(const ProtoHEFCoreOpMock &net_group, const std::string &partial_network_name);
- static std::string get_network_name(const std::string &net_group_name, const std::string &partial_network_name);
-
-private:
- static hailo_status fill_layer_info_with_base_info(const ProtoHEFEdgeLayerBase &base_info,
- const ProtoHEFEdgeConnectionType &edge_connection_type,
- const ProtoHEFNetworkGroupMetadata &network_group_proto, bool hw_padding_supported, bool transposed,
- const uint8_t context_index, const uint8_t network_index, LayerInfo &layer_info);
- static hailo_status fill_layer_info(const ProtoHEFEdgeLayerInfo &info,
- const ProtoHEFEdgeConnectionType &edge_connection_type,
- const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
- bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
- uint8_t network_index, LayerInfo &layer_info);
- static hailo_status fill_fused_nms_info(const ProtoHEFEdgeLayerFused &info,
- LayerInfo &layer_info, hailo_quant_info_t &defuse_quant_info, const std::string &network_name);
- static hailo_status fill_mux_info(const ProtoHEFEdgeLayerMux &info,
- const ProtoHEFEdgeConnectionType &edge_connection_type,
- const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
- bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name,
- uint8_t network_index, LayerInfo &layer_info);
-};
-
-} /* namespace hailort */
-
-#endif /* _HEF_INTERNAL_HPP_ */
#define VDMA_CHANNEL_CONTROL_OFFSET (0x00)
#define VDMA_CHANNEL_NUM_AVAIL_OFFSET (0x02)
#define VDMA_CHANNEL_NUM_PROC_OFFSET (0x04)
-#define VDMA_CHANNEL_ERROR_OFFSET (0x08)
#endif /* _HAILO_HW_CONSTS_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file inference_pipeline.cpp
- * @brief Implemention of inference pipeline
- **/
-
-#include "hailo/inference_pipeline.hpp"
-#include "common/async_thread.hpp"
-#include "vstream_internal.hpp"
-#include "hailort_defaults.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-
-#include <sstream>
-
-namespace hailort
-{
-
-InferVStreams::InferVStreams(std::vector<InputVStream> &&inputs, std::vector<OutputVStream> &&outputs, bool is_multi_context,
- uint16_t batch_size) :
- m_inputs(std::move(inputs)),
- m_outputs(std::move(outputs)),
- m_is_multi_context(is_multi_context),
- m_batch_size(batch_size)
-{
- for (auto &input : m_inputs) {
- if (contains(m_network_name_to_input_count, input.network_name())) {
- ++m_network_name_to_input_count[input.network_name()];
- } else {
- m_network_name_to_input_count.emplace(input.network_name(), 1);
- }
- }
- for (auto &output : m_outputs) {
- if (contains(m_network_name_to_output_count, output.network_name())) {
- ++m_network_name_to_output_count[output.network_name()];
- } else {
- m_network_name_to_output_count.emplace(output.network_name(), 1);
- }
- }
-}
-
-hailo_status InferVStreams::verify_network_inputs_and_outputs(const std::map<std::string, MemoryView>& inputs_name_mem_view_map,
- const std::map<std::string, MemoryView>& outputs_name_mem_view_map)
-{
- std::map<std::string, std::pair<size_t, size_t>> input_output_count_per_network;
-
- for (const auto &input_name_to_memview : inputs_name_mem_view_map) {
- auto input_vstream = get_input_by_name(input_name_to_memview.first);
- CHECK_EXPECTED_AS_STATUS(input_vstream);
- auto network_name = input_vstream->get().network_name();
- if (contains(input_output_count_per_network, network_name)) {
- ++input_output_count_per_network[network_name].first;
- } else {
- input_output_count_per_network.emplace(network_name, std::pair<size_t, size_t>(1, 0));
- }
- }
- for (const auto &output_name_to_memview : outputs_name_mem_view_map) {
- auto output_vstream = get_output_by_name(output_name_to_memview.first);
- CHECK_EXPECTED_AS_STATUS(output_vstream);
- auto network_name = output_vstream->get().network_name();
- if (contains(input_output_count_per_network, network_name)) {
- ++input_output_count_per_network[network_name].second;
- } else {
- input_output_count_per_network.emplace(network_name, std::pair<size_t, size_t>(0, 1));
- }
- }
- CHECK(!m_is_multi_context || (input_output_count_per_network.size() == m_network_name_to_input_count.size()), HAILO_INVALID_ARGUMENT,
- "For multi-context network groups, inference is only supported on all available networks");
-
- for (const auto &network_to_input_output_count : input_output_count_per_network) {
- CHECK(network_to_input_output_count.second.first == m_network_name_to_input_count[network_to_input_output_count.first],
- HAILO_INVALID_ARGUMENT, "Not all inputs have been provided for network {}", network_to_input_output_count.first);
- CHECK(network_to_input_output_count.second.second == m_network_name_to_output_count[network_to_input_output_count.first],
- HAILO_INVALID_ARGUMENT, "Not all outputs have been provided for network {}", network_to_input_output_count.first);
- }
- return HAILO_SUCCESS;
-}
-
-static hailo_status verify_vstream_params_in_vstream_infos(const std::map<std::string, hailo_vstream_params_t> ¶ms,
- const std::vector<hailo_vstream_info_t> &vstream_infos)
-{
- for (const auto &name_to_param : params) {
- const auto &name = name_to_param.first;
- bool found = false;
- for (const auto &vstream_info : vstream_infos) {
- if (vstream_info.name == name) {
- found = true;
- break;
- }
- }
- CHECK(found, HAILO_NOT_FOUND, "Could not find vstream {}", name);
- }
- return HAILO_SUCCESS;
-}
-
-Expected<InferVStreams> InferVStreams::create(ConfiguredNetworkGroup &net_group,
- const std::map<std::string, hailo_vstream_params_t> &input_params,
- const std::map<std::string, hailo_vstream_params_t> &output_params)
-{
- auto network_infos = net_group.get_network_infos();
- CHECK_EXPECTED(network_infos);
-
- auto is_multi_context = net_group.is_multi_context();
- std::map<std::string, std::pair<size_t, size_t>> input_param_count_per_network;
- size_t total_inputs_found = 0;
- size_t total_outputs_found = 0;
-
- uint16_t batch_size = 0;
- if (is_multi_context) {
- const auto &config_params = net_group.get_config_params();
- batch_size = config_params.batch_size;
-
- if (HAILO_DEFAULT_BATCH_SIZE == batch_size) {
- uint16_t network_batch_size = config_params.network_params_by_name.begin()->second.batch_size;
- for (const auto &name_params_pair : config_params.network_params_by_name) {
- CHECK_AS_EXPECTED(network_batch_size == name_params_pair.second.batch_size, HAILO_INVALID_ARGUMENT,
- "Batch size of each network must be the same!");
- }
-
- batch_size = network_batch_size;
- }
- }
-
- if (HAILO_DEFAULT_BATCH_SIZE == batch_size) {
- batch_size = DEFAULT_ACTUAL_BATCH_SIZE;
- }
-
- for (const auto &network_info : network_infos.value()) {
- auto input_vstream_infos_per_network = net_group.get_input_vstream_infos(network_info.name);
- CHECK_EXPECTED(input_vstream_infos_per_network);
-
- size_t input_counter = 0;
- for (const auto &vstream_info : input_vstream_infos_per_network.value()) {
- if (contains(input_params, std::string(vstream_info.name))) {
- ++input_counter;
- ++total_inputs_found;
- }
- }
-
- auto output_vstream_infos_per_network = net_group.get_output_vstream_infos(network_info.name);
- CHECK_EXPECTED(output_vstream_infos_per_network);
-
- size_t output_counter = 0;
- for (const auto &vstream_info : output_vstream_infos_per_network.value()) {
- if (contains(output_params, std::string(vstream_info.name))) {
- ++output_counter;
- ++total_outputs_found;
- }
- }
-
- if ((0 != input_counter) || (0 != output_counter)) {
- CHECK_AS_EXPECTED(input_counter == input_vstream_infos_per_network->size(), HAILO_INVALID_ARGUMENT,
- "Found only partial inputs for network {}", network_info.name);
- CHECK_AS_EXPECTED(output_counter == output_vstream_infos_per_network->size(), HAILO_INVALID_ARGUMENT,
- "Found only partial outputs for network {}", network_info.name);
- } else {
- CHECK_AS_EXPECTED(!is_multi_context, HAILO_INVALID_ARGUMENT,
- "For multi-context network groups, the pipeline must be created for all available networks");
- }
- }
-
- if (total_inputs_found != input_params.size()) {
- auto all_input_vstream_infos = net_group.get_input_vstream_infos();
- CHECK_EXPECTED(all_input_vstream_infos);
-
- auto status = verify_vstream_params_in_vstream_infos(input_params, all_input_vstream_infos.release());
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
- if (total_outputs_found != output_params.size()) {
- auto all_output_vstream_infos = net_group.get_output_vstream_infos();
- CHECK_EXPECTED(all_output_vstream_infos);
-
- auto status = verify_vstream_params_in_vstream_infos(output_params, all_output_vstream_infos.release());
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- auto input_vstreams = VStreamsBuilder::create_input_vstreams(net_group, input_params);
- CHECK_EXPECTED(input_vstreams);
-
- auto output_vstreams = VStreamsBuilder::create_output_vstreams(net_group, output_params);
- CHECK_EXPECTED(output_vstreams);
-
- return InferVStreams(input_vstreams.release(), output_vstreams.release(), is_multi_context, batch_size);
-}
-
-hailo_status InferVStreams::infer(const std::map<std::string, MemoryView>& input_data,
- std::map<std::string, MemoryView>& output_data, size_t frames_count)
-{
- auto status = verify_network_inputs_and_outputs(input_data, output_data);
- CHECK_SUCCESS(status);
-
- status = verify_memory_view_size(input_data, output_data, frames_count);
- CHECK_SUCCESS(status);
-
- status = verify_frames_count(frames_count);
- CHECK_SUCCESS(status);
-
- std::vector<AsyncThreadPtr<hailo_status>> results;
-
- // Launch async read/writes
- for (auto &input_name_to_data_pair : input_data) {
- auto input_vstream_exp = get_input_by_name(input_name_to_data_pair.first);
- CHECK_EXPECTED_AS_STATUS(input_vstream_exp);
- auto &input_vstream = input_vstream_exp.release().get();
- results.emplace_back(std::make_unique<AsyncThread<hailo_status>>(
- [&input_vstream, &input_name_to_data_pair, frames_count]() -> hailo_status {
- const auto &input_buffer = input_name_to_data_pair.second;
- for (uint32_t i = 0; i < frames_count; i++) {
- const size_t offset = i * input_vstream.get_frame_size();
- auto status = input_vstream.write(MemoryView::create_const(
- input_buffer.data() + offset,
- input_vstream.get_frame_size()));
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__DEBUG("Input stream was aborted!");
- return status;
- }
- CHECK_SUCCESS(status);
- }
- return HAILO_SUCCESS;
- }
- ));
- }
- for (auto &output_name_to_data_pair : output_data) {
- auto output_vstream_exp = get_output_by_name(output_name_to_data_pair.first);
- CHECK_EXPECTED_AS_STATUS(output_vstream_exp);
- auto &output_vstream = output_vstream_exp.release().get();
- results.emplace_back(std::make_unique<AsyncThread<hailo_status>>(
- [&output_vstream, &output_name_to_data_pair, frames_count]() {
- for (size_t i = 0; i < frames_count; i++) {
- auto status = output_vstream.read(MemoryView(output_name_to_data_pair.second.data() + i * output_vstream.get_frame_size(), output_vstream.get_frame_size()));
- if (HAILO_SUCCESS != status) {
- return status;
- }
- }
- return HAILO_SUCCESS;
- }
- ));
- }
-
- // Wait for all results
- auto error_status = HAILO_SUCCESS;
- for (auto& result : results) {
- status = result->get();
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- continue;
- }
- if (HAILO_SUCCESS != status) {
- error_status = status;
- LOGGER__ERROR("Failed waiting for threads with status {}", error_status);
- }
- }
- if (HAILO_SUCCESS != error_status) {
- return error_status;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status InferVStreams::verify_memory_view_size(const std::map<std::string, MemoryView>& inputs_name_mem_view_map,
- const std::map<std::string, MemoryView>& outputs_name_mem_view_map, size_t frames_count)
-{
- for (const auto &input_name_to_memview : inputs_name_mem_view_map) {
- auto input_vstream_exp = get_input_by_name(input_name_to_memview.first);
- CHECK_EXPECTED_AS_STATUS(input_vstream_exp);
- auto &input_vstream = input_vstream_exp.release().get();
- CHECK(frames_count * input_vstream.get_frame_size() == input_name_to_memview.second.size(), HAILO_INVALID_ARGUMENT,
- "Memory size of vstream {} does not match the frame count! (Expected {}, got {})",
- input_vstream.name(), frames_count * input_vstream.get_frame_size(), input_name_to_memview.second.size());
- }
- for (const auto &output_name_to_memview : outputs_name_mem_view_map) {
- auto output_vstream_exp = get_output_by_name(output_name_to_memview.first);
- CHECK_EXPECTED_AS_STATUS(output_vstream_exp);
- auto &output_vstream = output_vstream_exp.release().get();
- CHECK(frames_count * output_vstream.get_frame_size() == output_name_to_memview.second.size(), HAILO_INVALID_ARGUMENT,
- "Memory size of vstream {} does not match the frame count! (Expected {}, got {})",
- output_vstream.name(), frames_count * output_vstream.get_frame_size(), output_name_to_memview.second.size());
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status InferVStreams::verify_frames_count(size_t frames_count)
-{
- CHECK((!m_is_multi_context) || (frames_count % m_batch_size == 0), HAILO_INVALID_ARGUMENT,
- "Frames count is not a multiplier of the batch size! ({} % {} != 0)", frames_count, m_batch_size);
- return HAILO_SUCCESS;
-}
-
-Expected<std::reference_wrapper<InputVStream>> InferVStreams::get_input_by_name(const std::string &name)
-{
- for (auto &input_vstream : m_inputs) {
- if (input_vstream.name() == name) {
- return std::ref(input_vstream);
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-Expected<std::reference_wrapper<OutputVStream>> InferVStreams::get_output_by_name(const std::string &name)
-{
- for (auto &ouput_vstream : m_outputs) {
- if (ouput_vstream.name() == name) {
- return std::ref(ouput_vstream);
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-std::vector<std::reference_wrapper<InputVStream>> InferVStreams::get_input_vstreams()
-{
- std::vector<std::reference_wrapper<InputVStream>> vsterams_refs;
- for (auto &input_vstream : m_inputs) {
- vsterams_refs.push_back(std::ref(input_vstream));
- }
- return vsterams_refs;
-}
-
-std::vector<std::reference_wrapper<OutputVStream>> InferVStreams::get_output_vstreams()
-{
- std::vector<std::reference_wrapper<OutputVStream>> vsterams_refs;
- for (auto &ouput_vstream : m_outputs) {
- vsterams_refs.push_back(std::ref(ouput_vstream));
- }
- return vsterams_refs;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file inter_context_buffer.cpp
- * @brief Manages inter-context buffer.
- */
-
-#include "context_switch/multi_context/resource_manager.hpp"
-#include "inter_context_buffer.hpp"
-#include "vdma/sg_buffer.hpp"
-#include "vdma/continuous_buffer.hpp"
-
-
-namespace hailort
-{
-
-Expected<InterContextBuffer> InterContextBuffer::create(HailoRTDriver &driver, uint32_t transfer_size,
- uint16_t max_batch_size)
-{
- auto buffer_exp = should_use_ccb(driver) ?
- create_ccb_buffer(driver, transfer_size, max_batch_size) :
- create_sg_buffer(driver, transfer_size, max_batch_size);
- CHECK_EXPECTED(buffer_exp);
- auto buffer_ptr = buffer_exp.release();
-
- size_t acc_offset = 0;
- for (uint16_t i = 0; i < max_batch_size; i++) {
- const auto first_desc_interrupts_domain = VdmaInterruptsDomain::NONE;
- const auto last_desc_interrupts_domain = ((max_batch_size - 1) == i) ?
- VdmaInterruptsDomain::DEVICE : VdmaInterruptsDomain::NONE;
- static const auto BUFFER_NOT_CIRCULAR = false;
- auto desc_count_local = buffer_ptr->program_descriptors(transfer_size, first_desc_interrupts_domain,
- last_desc_interrupts_domain, acc_offset, BUFFER_NOT_CIRCULAR);
- CHECK_EXPECTED(desc_count_local, "Failed to program descs for inter context channels. Given max_batch_size is too big.");
- acc_offset += desc_count_local.value();
- }
-
- return InterContextBuffer(std::move(buffer_ptr), transfer_size, max_batch_size);
-}
-
-hailo_status InterContextBuffer::reprogram(uint16_t batch_size)
-{
- const auto prev_batch_size = m_dynamic_batch_size;
- auto status = set_dynamic_batch_size(batch_size);
- CHECK_SUCCESS(status);
-
- if (prev_batch_size == m_dynamic_batch_size) {
- LOGGER__TRACE("Batch size hasn't changed ({}); nothing to be done.", batch_size);
- return HAILO_SUCCESS;
- }
-
- status = m_buffer->reprogram_device_interrupts_for_end_of_batch(m_transfer_size, prev_batch_size,
- VdmaInterruptsDomain::NONE);
- CHECK_SUCCESS(status, "Failed reprogramming device interrupts for the end of the previous batch (size {})",
- prev_batch_size);
- status = m_buffer->reprogram_device_interrupts_for_end_of_batch(m_transfer_size, m_dynamic_batch_size,
- VdmaInterruptsDomain::DEVICE);
- CHECK_SUCCESS(status, "Failed reprogramming device interrupts for the end of the current batch (size {})",
- m_dynamic_batch_size);
-
- return HAILO_SUCCESS;
-}
-
-Expected<Buffer> InterContextBuffer::read()
-{
- const auto size = m_transfer_size * m_dynamic_batch_size;
- assert(size <= m_buffer->size());
-
- auto res = Buffer::create(size);
- CHECK_EXPECTED(res);
-
- auto status = m_buffer->read(res->data(), size, 0);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return res.release();
-}
-
-CONTROL_PROTOCOL__host_buffer_info_t InterContextBuffer::get_host_buffer_info() const
-{
- return m_buffer->get_host_buffer_info(m_transfer_size);
-}
-
-InterContextBuffer::InterContextBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size,
- uint16_t batch_size) :
- m_buffer(std::move(buffer)),
- m_transfer_size(transfer_size),
- m_max_batch_size(batch_size),
- m_dynamic_batch_size(batch_size)
-{}
-
-hailo_status InterContextBuffer::set_dynamic_batch_size(uint16_t batch_size)
-{
- if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == batch_size) {
- LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == batch_size; "
- "Leaving previously set value of {}", m_dynamic_batch_size);
- } else {
- CHECK(batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
- "batch_size ({}) must be <= than m_max_batch_size ({})",
- batch_size, m_max_batch_size);
-
- LOGGER__TRACE("Setting intermediate buffer's batch_size to {}", batch_size);
- m_dynamic_batch_size = batch_size;
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<vdma::VdmaBuffer>> InterContextBuffer::create_sg_buffer(HailoRTDriver &driver,
- uint32_t transfer_size, uint16_t batch_size)
-{
- auto desc_sizes_pair = VdmaDescriptorList::get_desc_buffer_sizes_for_single_transfer(driver,
- batch_size, batch_size, transfer_size);
- CHECK_EXPECTED(desc_sizes_pair);
- auto desc_page_size = desc_sizes_pair->first;
- auto descs_count = desc_sizes_pair->second;
-
- auto buffer = vdma::SgBuffer::create(driver, descs_count, desc_page_size,
- HailoRTDriver::DmaDirection::BOTH);
- CHECK_EXPECTED(buffer);
-
- auto buffer_ptr = make_unique_nothrow<vdma::SgBuffer>(buffer.release());
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
-}
-
-Expected<std::unique_ptr<vdma::VdmaBuffer>> InterContextBuffer::create_ccb_buffer(HailoRTDriver &driver,
- uint32_t transfer_size, uint16_t batch_size)
-{
- // The first 12 channels in D2H CCB ("regular channels") requires that the amount of descriptors will be a power
- // of 2. Altough the 4 last channels ("enhanced channels") don't have this requirements, we keep the code the same.
- auto buffer_size = vdma::ContinuousBuffer::get_buffer_size_desc_power2(transfer_size * batch_size);
- auto buffer = vdma::ContinuousBuffer::create(buffer_size, driver);
- CHECK_EXPECTED(buffer);
-
- auto buffer_ptr = make_unique_nothrow<vdma::ContinuousBuffer>(buffer.release());
- CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return std::unique_ptr<vdma::VdmaBuffer>(std::move(buffer_ptr));
-}
-
-bool InterContextBuffer::should_use_ccb(HailoRTDriver &driver)
-{
- switch (driver.dma_type()) {
- case HailoRTDriver::DmaType::PCIE:
- return false;
- case HailoRTDriver::DmaType::DRAM:
- return true;
- }
-
- // Shouldn't reach here
- assert(false);
- return false;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file inter_context_buffer.hpp
- * @brief Manages inter-context buffer.
- */
-
-#ifndef _HAILO_INTER_CONTEXT_BUFFER_HPP_
-#define _HAILO_INTER_CONTEXT_BUFFER_HPP_
-
-#include "os/hailort_driver.hpp"
-#include "vdma/vdma_buffer.hpp"
-#include "hailo/expected.hpp"
-#include "hailo/buffer.hpp"
-#include "control_protocol.h"
-
-
-namespace hailort
-{
-
-class InterContextBuffer final {
-public:
- static Expected<InterContextBuffer> create(HailoRTDriver &driver, uint32_t transfer_size,
- uint16_t max_batch_size);
-
- hailo_status reprogram(uint16_t batch_size);
- Expected<Buffer> read();
-
- CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;
-
-private:
- InterContextBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size, uint16_t batch_size);
- hailo_status set_dynamic_batch_size(uint16_t batch_size);
-
- static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,
- uint32_t transfer_size, uint16_t batch_size);
- static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_ccb_buffer(HailoRTDriver &driver,
- uint32_t transfer_size, uint16_t batch_size);
-
- static bool should_use_ccb(HailoRTDriver &driver);
-
- std::unique_ptr<vdma::VdmaBuffer> m_buffer;
- const uint32_t m_transfer_size;
- const uint16_t m_max_batch_size;
- uint16_t m_dynamic_batch_size;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_INTER_CONTEXT_BUFFER_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hef.hpp
- * @brief Hef parsing and configuration functions
- **/
-
-#ifndef _HAILO_LAYER_INFO_HPP_
-#define _HAILO_LAYER_INFO_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/hailort_common.hpp"
-#include "hailort_defaults.hpp"
-#include "control_protocol.h"
-#include "os/hailort_driver.hpp"
-
-#include <vector>
-#include <memory>
-#include <map>
-
-namespace hailort
-{
-
-#define INVALID_PAD_INDEX (UINT32_MAX)
-
-enum class LayerType
-{
- NOT_SET = 0,
- BOUNDARY = 1,
- INTER_CONTEXT = 2,
- DDR = 3,
- CFG = 4
-};
-
-struct BufferIndices {
- uint32_t index;
- uint32_t cluster_index;
-};
-
-struct ConnectedContextInfo {
- uint8_t context_index;
- uint8_t dma_engine_index;
- uint8_t stream_index;
-};
-
-struct DdrInfo {
- // total_buffers_per_frame not same as core_buffer_per frame.
- //(In DDR core buffer per frame is 1). Used to calc total host descriptors_per_frame.
- uint16_t total_buffers_per_frame;
- uint16_t min_buffered_rows;
-};
-
-
-struct LayerInfo {
- LayerType type = LayerType::NOT_SET;
- hailo_stream_direction_t direction;
- uint8_t stream_index;
- uint8_t dma_engine_index;
- std::string name;
- std::string network_name;
- uint8_t network_index;
- CONTROL_PROTOCOL__nn_stream_config_t nn_stream_config;
- uint32_t max_shmifo_size;
- uint8_t context_index;
- uint32_t pad_index = INVALID_PAD_INDEX;
-
- // Transformation and shape info
- hailo_3d_image_shape_t shape;
- hailo_3d_image_shape_t hw_shape;
- uint32_t hw_data_bytes;
- hailo_format_t format;
- hailo_quant_info_t quant_info;
- hailo_nms_info_t nms_info;
-
- // Mux info
- bool is_mux;
- std::vector<LayerInfo> predecessor;
- uint32_t height_gcd;
- std::vector<uint32_t> height_ratios;
-
- // Defused nms info
- bool is_defused_nms;
- // TODO HRT-4441 change fused_layer from vector.
- std::vector<LayerInfo> fused_nms_layer;
-
- // Simulation Info
- BufferIndices buffer_indices;
-
- // Context switch info TODO: we should use std::optional for this structures (or implement our self).
- ConnectedContextInfo connected_context_info;
- DdrInfo ddr_info;
-};
-
-// LayerIdentifier = <LayerType, layer_name, stream_index>
-using LayerIdentifier = std::tuple<LayerType, std::string, uint8_t>;
-
-inline LayerIdentifier to_layer_identifier(const LayerInfo &info)
-{
- return std::make_tuple(info.type, info.name, info.stream_index);
-}
-
-class LayerInfoUtils {
-public:
- static hailo_stream_info_t get_stream_info_from_layer_info(const LayerInfo &layer_info)
- {
- hailo_stream_info_t res = {};
- res.hw_data_bytes = layer_info.hw_data_bytes;
- res.format = layer_info.format;
- if (HAILO_FORMAT_ORDER_HAILO_NMS == res.format.order) {
- res.nms_info = layer_info.nms_info;
- res.hw_frame_size =
- HailoRTCommon::get_nms_hw_frame_size(res.nms_info);
- } else {
- res.shape.height = layer_info.shape.height;
- res.shape.width = layer_info.shape.width;
- res.shape.features = layer_info.shape.features;
- res.hw_shape.height = layer_info.hw_shape.height;
- res.hw_shape.width = layer_info.hw_shape.width;
- res.hw_shape.features = layer_info.hw_shape.features;
- res.hw_frame_size =
- res.hw_shape.height * res.hw_shape.width * res.hw_shape.features * res.hw_data_bytes;
- }
- res.direction = layer_info.direction;
- res.index = layer_info.stream_index;
- assert(layer_info.name.length() < HAILO_MAX_NAME_SIZE);
- strncpy(res.name, layer_info.name.c_str(), layer_info.name.length() + 1);
- res.quant_info = layer_info.quant_info;
- res.is_mux = layer_info.is_mux;
-
- return res;
- }
-
- static bool vstream_info_already_in_vector(const std::vector<hailo_vstream_info_t> &vec, const std::string &name)
- {
- for (const auto &info : vec) {
- if (name == info.name) {
- return true;
- }
- }
- return false;
- }
-
- static std::vector<hailo_vstream_info_t> get_vstream_infos_from_layer_info(const LayerInfo &layer_info)
- {
- std::vector<hailo_vstream_info_t> res = {};
- if (layer_info.is_mux) {
- for (auto &pred : layer_info.predecessor) {
- auto vstream_infos = get_vstream_infos_from_layer_info(pred);
- res.insert(res.end(), vstream_infos.begin(), vstream_infos.end());
- }
- } else if (layer_info.is_defused_nms) {
- for (auto &fused_nms : layer_info.fused_nms_layer) {
- // In case of fused nms layers, several LayerInfos will contain data about the same fused layer
- if (!vstream_info_already_in_vector(res, fused_nms.name)) {
- auto vstream_info = get_vstream_info_from_layer_info_impl(fused_nms);
- res.push_back(vstream_info);
- }
- }
- } else {
- auto vstream_info = get_vstream_info_from_layer_info_impl(layer_info);
- res.push_back(vstream_info);
- }
-
- return res;
- }
-
-private:
- static hailo_vstream_info_t get_vstream_info_from_layer_info_impl(const LayerInfo &layer_info)
- {
- hailo_vstream_info_t res = {};
- res.format.type = layer_info.format.type;
- res.format.flags = layer_info.format.flags;
- res.format.order = HailoRTDefaults::get_default_host_format_order(layer_info.format);
- if (HAILO_FORMAT_ORDER_HAILO_NMS == res.format.order) {
- res.nms_shape.max_bboxes_per_class = layer_info.nms_info.max_bboxes_per_class * layer_info.nms_info.chunks_per_frame;
- res.nms_shape.number_of_classes = layer_info.nms_info.number_of_classes;
- } else {
- res.shape.height = layer_info.shape.height;
- res.shape.width = layer_info.shape.width;
- res.shape.features = layer_info.shape.features;
- }
- res.direction = layer_info.direction;
- assert(layer_info.name.length() < HAILO_MAX_STREAM_NAME_SIZE);
- strncpy(res.name, layer_info.name.c_str(), layer_info.name.length() + 1);
- assert(layer_info.network_name.length() < HAILO_MAX_NETWORK_NAME_SIZE);
- strncpy(res.network_name, layer_info.network_name.c_str(), layer_info.network_name.length() + 1);
- res.quant_info = layer_info.quant_info;
-
- return res;
- }
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_LAYER_INFO_HPP_ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/mipi_stream.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file mipi_stream.cpp
+ * @brief MIPI stream implementation.
+ *
+ * MIPI is an interface which defines the connection between a host processor and a display
+ * module, which in our case is a camera. Here we define a MipiInputStream which when it is configured
+ * and opened, the camera module should send input data, meaning that the write functions here are not
+ * implemented.
+ **/
+
+#include "hailo/hailort.h"
+
+#include "common/utils.hpp"
+
+#include "device_common/control.hpp"
+#include "mipi/mipi_stream.hpp"
+
+
+namespace hailort
+{
+
+MipiInputStream::MipiInputStream(Device &device, const CONTROL_PROTOCOL__mipi_input_config_params_t &mipi_params,
+ EventPtr &&core_op_activated_event, const LayerInfo &layer_info, hailo_status &status) :
+ InputStreamBase(layer_info, HAILO_STREAM_INTERFACE_MIPI, std::move(core_op_activated_event), status),
+ m_device(device),
+ m_is_stream_activated(false),
+ m_mipi_input_params(mipi_params)
+{}
+
+MipiInputStream::~MipiInputStream()
+{
+ if (m_is_stream_activated) {
+ auto status = deactivate_stream();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Close stream failed! (status {} stream index {})", status, m_stream_info.index);
+ }
+ }
+}
+
+hailo_status MipiInputStream::deactivate_stream()
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ ASSERT(m_is_stream_activated);
+
+ status = Control::close_stream(m_device, m_dataflow_manager_id, true);
+ CHECK_SUCCESS(status);
+
+ m_is_stream_activated = false;
+ return HAILO_SUCCESS;
+}
+
+std::chrono::milliseconds MipiInputStream::get_timeout() const
+{
+ LOGGER__WARNING("get_timeout() in MipiInputStream is not supported!");
+ assert(false);
+ return std::chrono::milliseconds(0);
+}
+
+hailo_status MipiInputStream::abort()
+{
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status MipiInputStream::clear_abort()
+{
+ return HAILO_INVALID_OPERATION;
+}
+
+CONTROL_PROTOCOL__mipi_input_config_params_t MipiInputStream::hailo_mipi_params_to_control_mipi_params(
+ const hailo_mipi_input_stream_params_t ¶ms)
+{
+ CONTROL_PROTOCOL__mipi_input_config_params_t control_mipi_params;
+ control_mipi_params.common_params.data_type = static_cast<uint8_t>(params.data_type);
+ control_mipi_params.common_params.pixels_per_clock = static_cast<uint8_t>(params.mipi_common_params.pixels_per_clock);
+ control_mipi_params.mipi_rx_id = params.mipi_rx_id;
+ control_mipi_params.common_params.number_of_lanes = params.mipi_common_params.number_of_lanes;
+ control_mipi_params.common_params.clock_selection = static_cast<uint8_t>(params.mipi_common_params.clock_selection);
+ control_mipi_params.common_params.data_rate = params.mipi_common_params.data_rate;
+ control_mipi_params.common_params.virtual_channel_index = params.mipi_common_params.virtual_channel_index;
+ control_mipi_params.common_params.img_width_pixels = params.mipi_common_params.img_width_pixels;
+ control_mipi_params.common_params.img_height_pixels = params.mipi_common_params.img_height_pixels;
+ control_mipi_params.isp_params.isp_enable = params.isp_enable;
+ control_mipi_params.isp_params.isp_img_in_order = static_cast<uint8_t>(params.isp_params.isp_img_in_order);
+ control_mipi_params.isp_params.isp_img_out_data_type = static_cast<uint8_t>(params.isp_params.isp_img_out_data_type);
+ control_mipi_params.isp_params.isp_crop_enable = params.isp_params.isp_crop_enable;
+ control_mipi_params.isp_params.isp_crop_output_width_pixels = params.isp_params.isp_crop_output_width_pixels;
+ control_mipi_params.isp_params.isp_crop_output_height_pixels = params.isp_params.isp_crop_output_height_pixels;
+ control_mipi_params.isp_params.isp_crop_output_width_start_offset_pixels = params.isp_params.isp_crop_output_width_start_offset_pixels;
+ control_mipi_params.isp_params.isp_crop_output_height_start_offset_pixels = params.isp_params.isp_crop_output_height_start_offset_pixels;
+ control_mipi_params.isp_params.isp_test_pattern_enable = params.isp_params.isp_test_pattern_enable;
+ control_mipi_params.isp_params.isp_configuration_bypass = params.isp_params.isp_configuration_bypass;
+ control_mipi_params.isp_params.isp_run_time_ae_enable = params.isp_params.isp_run_time_ae_enable;
+ control_mipi_params.isp_params.isp_run_time_awb_enable = params.isp_params.isp_run_time_awb_enable;
+ control_mipi_params.isp_params.isp_run_time_adt_enable = params.isp_params.isp_run_time_adt_enable;
+ control_mipi_params.isp_params.isp_run_time_af_enable = params.isp_params.isp_run_time_af_enable;
+ control_mipi_params.isp_params.isp_run_time_calculations_interval_ms = params.isp_params.isp_run_time_calculations_interval_ms;
+ control_mipi_params.isp_params.isp_light_frequency = static_cast<uint8_t>(params.isp_params.isp_light_frequency);
+ return control_mipi_params;
+}
+
+// Note: Mipi streams don't work with dynamic batch sizes
+hailo_status MipiInputStream::activate_stream(uint16_t /* dynamic_batch_size */, bool /* resume_pending_stream_transfers */)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ CONTROL_PROTOCOL__config_stream_params_t params = {};
+
+ params.nn_stream_config = m_nn_stream_config;
+ params.communication_type = CONTROL_PROTOCOL__COMMUNICATION_TYPE_MIPI;
+ params.is_input = true;
+ params.stream_index = m_stream_info.index;
+ params.skip_nn_stream_config = false;
+ // Currently hardcoded assign as there are no power mode optimizations over mipi
+ params.power_mode = static_cast<uint8_t>(CONTROL_PROTOCOL__MODE_ULTRA_PERFORMANCE);
+ params.communication_params.mipi_input = m_mipi_input_params;
+
+ status = Control::config_stream_mipi_input(m_device, ¶ms, m_dataflow_manager_id);
+ CHECK_SUCCESS(status);
+
+ status = Control::open_stream(m_device, m_dataflow_manager_id, true);
+ CHECK_SUCCESS(status);
+
+ m_is_stream_activated = true;
+ return HAILO_SUCCESS;
+}
+
+Expected<size_t> MipiInputStream::sync_write_raw_buffer(const MemoryView &buffer)
+{
+ (void)buffer;
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status MipiInputStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
+{
+ (void)buffer;
+ (void)offset;
+ (void)size;
+ return HAILO_INVALID_OPERATION;
+}
+
+Expected<std::unique_ptr<MipiInputStream>> MipiInputStream::create(Device &device,
+ const LayerInfo &edge_layer, const hailo_mipi_input_stream_params_t ¶ms,
+ EventPtr core_op_activated_event)
+{
+ auto mipi_params = MipiInputStream::hailo_mipi_params_to_control_mipi_params(params);
+ auto status = HAILO_UNINITIALIZED;
+ std::unique_ptr<MipiInputStream> stream(new (std::nothrow) MipiInputStream(device, mipi_params,
+ std::move(core_op_activated_event), edge_layer, status));
+ CHECK_AS_EXPECTED(stream != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return stream;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file mipi_stream.hpp
+ * @brief MIPI stream definition.
+ *
+ * MipiInputStream is defined which will give the option to infer with data from a MIPI interface/sensor.
+ **/
+
+#ifndef HAILO_MIPI_STREAM_H_
+#define HAILO_MIPI_STREAM_H_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/event.hpp"
+
+#include "stream_common/stream_internal.hpp"
+
+
+namespace hailort
+{
+
+class MipiInputStream : public InputStreamBase {
+private:
+ MipiInputStream(Device &device, const CONTROL_PROTOCOL__mipi_input_config_params_t &mipi_params,
+ EventPtr &&core_op_activated_event, const LayerInfo &layer_info, hailo_status &status);
+
+ static CONTROL_PROTOCOL__mipi_input_config_params_t hailo_mipi_params_to_control_mipi_params(
+ const hailo_mipi_input_stream_params_t ¶ms);
+
+ Device &m_device;
+ bool m_is_stream_activated;
+ CONTROL_PROTOCOL__mipi_input_config_params_t m_mipi_input_params;
+
+protected:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
+ virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) { (void)timeout; return HAILO_INVALID_OPERATION; };
+
+public:
+ static Expected<std::unique_ptr<MipiInputStream>> create(Device &device,
+ const LayerInfo &edge_layer, const hailo_mipi_input_stream_params_t ¶ms,
+ EventPtr core_op_activated_event);
+ virtual ~MipiInputStream();
+
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_MIPI; }
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_MIPI_STREAM_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file mipi_stream.cpp
- * @brief MIPI stream implementation.
- *
- * MIPI is an interface which defines the connection between a host processor and a display
- * module, which in our case is a camera. Here we define a MipiInputStream which when it is configured
- * and opened, the camera module should send input data, meaning that the write functions here are not
- * implemented.
- **/
-
-#include <hailo/hailort.h>
-#include "common/utils.hpp"
-#include <control.hpp>
-#include "mipi_stream.hpp"
-
-namespace hailort
-{
-
-MipiInputStream::MipiInputStream(Device &device, const CONTROL_PROTOCOL__mipi_input_config_params_t &mipi_params,
- EventPtr &&network_group_activated_event, const LayerInfo &layer_info, hailo_status &status) :
- InputStreamBase(layer_info, HAILO_STREAM_INTERFACE_MIPI, std::move(network_group_activated_event), status),
- m_device(device),
- m_is_stream_activated(false),
- m_mipi_input_params(mipi_params)
-{}
-
-MipiInputStream::~MipiInputStream()
-{
- if (m_is_stream_activated) {
- auto status = deactivate_stream();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Close stream failed! (status {} stream index {})", status, m_stream_info.index);
- }
- }
-}
-
-hailo_status MipiInputStream::deactivate_stream()
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- ASSERT(m_is_stream_activated);
-
- status = Control::close_stream(m_device, m_dataflow_manager_id, true);
- CHECK_SUCCESS(status);
-
- m_is_stream_activated = false;
- return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds MipiInputStream::get_timeout() const
-{
- LOGGER__WARNING("get_timeout() in MipiInputStream is not supported!");
- assert(false);
- return std::chrono::milliseconds(0);
-}
-
-hailo_status MipiInputStream::abort()
-{
- return HAILO_INVALID_OPERATION;
-}
-
-hailo_status MipiInputStream::clear_abort()
-{
- return HAILO_INVALID_OPERATION;
-}
-
-CONTROL_PROTOCOL__mipi_input_config_params_t MipiInputStream::hailo_mipi_params_to_control_mipi_params(
- const hailo_mipi_input_stream_params_t ¶ms)
-{
- CONTROL_PROTOCOL__mipi_input_config_params_t control_mipi_params;
- control_mipi_params.common_params.data_type = static_cast<uint8_t>(params.data_type);
- control_mipi_params.common_params.pixels_per_clock = static_cast<uint8_t>(params.mipi_common_params.pixels_per_clock);
- control_mipi_params.mipi_rx_id = params.mipi_rx_id;
- control_mipi_params.common_params.number_of_lanes = params.mipi_common_params.number_of_lanes;
- control_mipi_params.common_params.clock_selection = static_cast<uint8_t>(params.mipi_common_params.clock_selection);
- control_mipi_params.common_params.data_rate = params.mipi_common_params.data_rate;
- control_mipi_params.common_params.virtual_channel_index = params.mipi_common_params.virtual_channel_index;
- control_mipi_params.common_params.img_width_pixels = params.mipi_common_params.img_width_pixels;
- control_mipi_params.common_params.img_height_pixels = params.mipi_common_params.img_height_pixels;
- control_mipi_params.isp_params.isp_enable = params.isp_enable;
- control_mipi_params.isp_params.isp_img_in_order = static_cast<uint8_t>(params.isp_params.isp_img_in_order);
- control_mipi_params.isp_params.isp_img_out_data_type = static_cast<uint8_t>(params.isp_params.isp_img_out_data_type);
- control_mipi_params.isp_params.isp_crop_enable = params.isp_params.isp_crop_enable;
- control_mipi_params.isp_params.isp_crop_output_width_pixels = params.isp_params.isp_crop_output_width_pixels;
- control_mipi_params.isp_params.isp_crop_output_height_pixels = params.isp_params.isp_crop_output_height_pixels;
- control_mipi_params.isp_params.isp_crop_output_width_start_offset_pixels = params.isp_params.isp_crop_output_width_start_offset_pixels;
- control_mipi_params.isp_params.isp_crop_output_height_start_offset_pixels = params.isp_params.isp_crop_output_height_start_offset_pixels;
- control_mipi_params.isp_params.isp_test_pattern_enable = params.isp_params.isp_test_pattern_enable;
- control_mipi_params.isp_params.isp_configuration_bypass = params.isp_params.isp_configuration_bypass;
- control_mipi_params.isp_params.isp_run_time_ae_enable = params.isp_params.isp_run_time_ae_enable;
- control_mipi_params.isp_params.isp_run_time_awb_enable = params.isp_params.isp_run_time_awb_enable;
- control_mipi_params.isp_params.isp_run_time_adt_enable = params.isp_params.isp_run_time_adt_enable;
- control_mipi_params.isp_params.isp_run_time_af_enable = params.isp_params.isp_run_time_af_enable;
- control_mipi_params.isp_params.isp_run_time_calculations_interval_ms = params.isp_params.isp_run_time_calculations_interval_ms;
- control_mipi_params.isp_params.isp_light_frequency = static_cast<uint8_t>(params.isp_params.isp_light_frequency);
- return control_mipi_params;
-}
-
-// Note: Mipi streams don't work with dynamic batch sizes
-hailo_status MipiInputStream::activate_stream(uint16_t /* dynamic_batch_size */)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- CONTROL_PROTOCOL__config_stream_params_t params = {};
-
- params.nn_stream_config = m_nn_stream_config;
- params.communication_type = CONTROL_PROTOCOL__COMMUNICATION_TYPE_MIPI;
- params.is_input = true;
- params.stream_index = m_stream_info.index;
- params.skip_nn_stream_config = false;
- // Currently hardcoded assign as there are no power mode optimizations over mipi
- params.power_mode = static_cast<uint8_t>(CONTROL_PROTOCOL__MODE_ULTRA_PERFORMANCE);
- params.communication_params.mipi_input = m_mipi_input_params;
-
- status = Control::config_stream_mipi_input(m_device, ¶ms, m_dataflow_manager_id);
- CHECK_SUCCESS(status);
-
- status = Control::open_stream(m_device, m_dataflow_manager_id, true);
- CHECK_SUCCESS(status);
-
- m_is_stream_activated = true;
- return HAILO_SUCCESS;
-}
-
-Expected<size_t> MipiInputStream::sync_write_raw_buffer(const MemoryView &buffer)
-{
- (void)buffer;
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-hailo_status MipiInputStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
-{
- (void)buffer;
- (void)offset;
- (void)size;
- return HAILO_INVALID_OPERATION;
-}
-
-Expected<std::unique_ptr<MipiInputStream>> MipiInputStream::create(Device &device,
- const LayerInfo &edge_layer, const hailo_mipi_input_stream_params_t ¶ms,
- EventPtr network_group_activated_event)
-{
- auto mipi_params = MipiInputStream::hailo_mipi_params_to_control_mipi_params(params);
- auto status = HAILO_UNINITIALIZED;
- std::unique_ptr<MipiInputStream> stream(new (std::nothrow) MipiInputStream(device, mipi_params,
- std::move(network_group_activated_event), edge_layer, status));
- CHECK_AS_EXPECTED(stream != nullptr, HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return stream;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file mipi_stream.hpp
- * @brief MIPI stream definition.
- *
- * MipiInputStream is defined which will give the option to infer with data from a MIPI interface/sensor.
- **/
-
-#ifndef HAILO_MIPI_STREAM_H_
-#define HAILO_MIPI_STREAM_H_
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/event.hpp"
-
-namespace hailort
-{
-
-class MipiInputStream : public InputStreamBase {
-private:
- MipiInputStream(Device &device, const CONTROL_PROTOCOL__mipi_input_config_params_t &mipi_params,
- EventPtr &&network_group_activated_event, const LayerInfo &layer_info, hailo_status &status);
-
- static CONTROL_PROTOCOL__mipi_input_config_params_t hailo_mipi_params_to_control_mipi_params(
- const hailo_mipi_input_stream_params_t ¶ms);
-
- Device &m_device;
- bool m_is_stream_activated;
- CONTROL_PROTOCOL__mipi_input_config_params_t m_mipi_input_params;
-
-protected:
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
- virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) { (void)timeout; return HAILO_INVALID_OPERATION; };
-
-public:
- static Expected<std::unique_ptr<MipiInputStream>> create(Device &device,
- const LayerInfo &edge_layer, const hailo_mipi_input_stream_params_t ¶ms,
- EventPtr network_group_activated_event);
-
- MipiInputStream(MipiInputStream&& other) :
- InputStreamBase(std::move(other)),
- m_device(other.m_device),
- m_is_stream_activated(std::exchange(other.m_is_stream_activated, false)),
- m_mipi_input_params(std::move(other.m_mipi_input_params))
- {}
-
- virtual ~MipiInputStream();
-
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_MIPI; }
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
-
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_MIPI_STREAM_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file multi_device_scheduled_stream.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "multi_device_scheduled_stream.hpp"
-
-namespace hailort
-{
-
-hailo_status MultiDeviceScheduledInputStream::send_pending_buffer(size_t device_index)
-{
- auto buffer = dequeue();
- CHECK_EXPECTED_AS_STATUS(buffer);
- auto status = m_streams[device_index].get().write_buffer_only(buffer.value());
- CHECK_SUCCESS(status);
-
- VdmaInputStream &vdma_input = static_cast<VdmaInputStream&>(m_streams[device_index].get());
- return vdma_input.send_pending_buffer();
-}
-
-Expected<size_t> MultiDeviceScheduledInputStream::sync_write_raw_buffer(const MemoryView &buffer,
- const std::function<bool()> &should_cancel)
-{
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK_AS_EXPECTED(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto status = network_group_scheduler->wait_for_write(m_network_group_handle, name(), get_timeout(), should_cancel);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Write to stream was aborted.");
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = enqueue(buffer);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Enqueue was aborted.");
- network_group_scheduler->mark_failed_write(m_network_group_handle, name());
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = network_group_scheduler->signal_write_finish(m_network_group_handle, name());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return buffer.size();
-}
-
-Expected<size_t> MultiDeviceScheduledInputStream::get_pending_frames_count() const
-{
- return get_queue_size();
-}
-
-hailo_status MultiDeviceScheduledInputStream::enqueue(const MemoryView &buffer)
-{
- return m_queue->enqueue(buffer, get_timeout());
-}
-
-Expected<MemoryView> MultiDeviceScheduledInputStream::dequeue()
-{
- return m_queue->dequeue(get_timeout());
-}
-
-size_t MultiDeviceScheduledInputStream::get_queue_size() const
-{
- return m_queue->size();
-}
-
-hailo_status MultiDeviceScheduledInputStream::abort()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto abort_status = stream.get().abort();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, stream.get().get_dev_id());
- status = abort_status;
- }
- }
- m_queue->abort();
-
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto disable_status = network_group_scheduler->disable_stream(m_network_group_handle, name());
- if (HAILO_SUCCESS != disable_status) {
- LOGGER__ERROR("Failed to disable stream in the network group scheduler. (status: {})", disable_status);
- status = disable_status;
- }
-
- return status;
-}
-
-hailo_status MultiDeviceScheduledInputStream::clear_abort()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto clear_abort_status = stream.get().clear_abort();
- if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
- LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
- status = clear_abort_status;
- }
- }
- m_queue->clear_abort();
-
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto enable_status = network_group_scheduler->enable_stream(m_network_group_handle, name());
- if (HAILO_SUCCESS != enable_status) {
- LOGGER__ERROR("Failed to enable stream in the network group scheduler. (status: {})", enable_status);
- status = enable_status;
- }
-
- return status;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file multi_device_stream.hpp
- * @brief Internal multi device stream implementation for scheduled streams
- *
- **/
-
-#ifndef HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_
-#define HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "vdevice_internal.hpp"
-#include "vdma_device.hpp"
-#include "scheduled_stream.hpp"
-#include "hailo/expected.hpp"
-
-namespace hailort
-{
-
-class BuffersQueue
-{
-public:
- static Expected<std::unique_ptr<BuffersQueue>> create_unique(size_t buffer_size, size_t buffers_count)
- {
- std::vector<Buffer> queue;
- queue.reserve(buffers_count);
- for (size_t i = 0; i < (buffers_count); i++) {
- auto buff = Buffer::create(buffer_size);
- CHECK_EXPECTED(buff);
- queue.emplace_back(buff.release());
- }
-
- auto ptr = make_unique_nothrow<BuffersQueue>(std::move(queue));
- CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
- return ptr;
- }
-
- hailo_status enqueue(const MemoryView &buff, const std::chrono::milliseconds &timeout)
- {
- auto status = HAILO_SUCCESS;
- {
- std::unique_lock<std::mutex> lock(m_mutex);
-
- // TODO: this validation is done in scheduler logic. can be removed?
- auto wait_res = m_cv.wait_for(lock, timeout, [this, &status] {
- if (m_should_stop) {
- status = HAILO_STREAM_ABORTED_BY_USER;
- return true;
- }
- return size() < m_queue.size();
- });
- CHECK(wait_res, HAILO_TIMEOUT, "Failed to enqueue frame with status={}, timeout={}ms", HAILO_TIMEOUT, timeout.count());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("'enqueue' was aborted by user");
- return status;
- }
-
- std::memcpy(m_queue[m_head].data(), buff.data(), buff.size());
- m_head = static_cast<uint32_t>((m_head + 1) % m_queue.size());
- m_is_empty = false;
- }
- m_cv.notify_all();
-
- return HAILO_SUCCESS;
- }
-
- Expected<MemoryView> dequeue(const std::chrono::milliseconds &timeout)
- {
- auto status = HAILO_SUCCESS;
- size_t last_tail = 0;
- {
- std::unique_lock<std::mutex> lock(m_mutex);
-
- // TODO: this validation is done in scheduler logic. can be removed?
- auto wait_res = m_cv.wait_for(lock, timeout, [this, &status] {
- if (m_should_stop) {
- status = HAILO_STREAM_ABORTED_BY_USER;
- return true;
- }
- return 0 < size();
- });
- CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "Failed to dequeue frame with status={}, timeout={}ms", HAILO_TIMEOUT, timeout.count());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("'dequeue' was aborted by user");
- return make_unexpected(status);
- }
-
- last_tail = m_tail;
- m_tail = static_cast<uint32_t>((m_tail + 1) % m_queue.size());
- if (m_tail == m_head) {
- m_is_empty = true;
- }
- }
- m_cv.notify_all();
-
- return MemoryView(m_queue[last_tail]);
- }
-
- size_t size()
- {
- if (m_head == m_tail) {
- return m_is_empty ? 0 : m_queue.size();
- } else if (m_head > m_tail) {
- return (m_head - m_tail);
- } else {
- return (m_queue.size() - m_tail) + m_head;
- }
- }
-
- void abort()
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_should_stop = true;
- m_cv.notify_all();
- }
-
- void clear_abort()
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_should_stop = false;
- m_cv.notify_all();
- }
-
- BuffersQueue(std::vector<Buffer> &&queue) : m_queue(std::move(queue)), m_head(0), m_tail(0), m_is_empty(true), m_should_stop(false)
- {}
-
-private:
- std::vector<Buffer> m_queue;
- std::atomic_uint32_t m_head;
- std::atomic_uint32_t m_tail;
-
- std::atomic_bool m_is_empty;
-
- std::condition_variable m_cv;
- std::mutex m_mutex;
- std::atomic_bool m_should_stop;
-};
-
-class MultiDeviceScheduledInputStream : public ScheduledInputStream {
-public:
- MultiDeviceScheduledInputStream(MultiDeviceScheduledInputStream &&other) :
- ScheduledInputStream(std::move(other)),
- m_queue(std::move(other.m_queue))
- {}
-
- explicit MultiDeviceScheduledInputStream(
- std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
- const scheduler_ng_handle_t &network_group_handle,
- EventPtr &&network_group_activated_event,
- const LayerInfo &layer_info,
- NetworkGroupSchedulerWeakPtr network_group_scheduler,
- std::unique_ptr<BuffersQueue> &&frames_queue,
- hailo_status &status) :
- ScheduledInputStream(std::move(streams), network_group_handle,
- std::move(network_group_activated_event), layer_info, network_group_scheduler, status),
- m_queue(std::move(frames_queue))
- {
- }
-
- virtual hailo_status send_pending_buffer(size_t device_index = 0) override;
- virtual Expected<size_t> get_pending_frames_count() const override;
-
-protected:
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer,
- const std::function<bool()> &should_cancel = []() { return false; }) override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
-
-private:
- hailo_status enqueue(const MemoryView &buffer);
- Expected<MemoryView> dequeue();
- size_t get_queue_size() const;
-
- std::unique_ptr<BuffersQueue> m_queue;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_ */
-cmake_minimum_required(VERSION 3.0.0)
\ No newline at end of file
+cmake_minimum_required(VERSION 3.0.0)
+
+set(HAILORT_OPS_CPP_SOURCES
+ ${CMAKE_CURRENT_SOURCE_DIR}/ops/nms_post_process.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/ops/yolo_post_process.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/ops/ssd_post_process.cpp
+)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/pipeline.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/inference_pipeline.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/vstream.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} ${HAILORT_OPS_CPP_SOURCES} PARENT_SCOPE)
+set(HAILORT_OPS_CPP_SOURCES ${HAILORT_OPS_CPP_SOURCES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file nms_post_process.cpp
+ * @brief NMS post process
+ *
+ * Reference code: https://github.com/winfredsu/ssd_postprocessing/blob/master/ssd_postprocessing.py
+ **/
+
+#include "net_flow/ops/nms_post_process.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+ float NmsPostProcessOp::compute_iou(const hailo_bbox_float32_t &box_1, const hailo_bbox_float32_t &box_2)
+ {
+ const float overlap_area_width = std::min(box_1.x_max, box_2.x_max) - std::max(box_1.x_min, box_2.x_min);
+ const float overlap_area_height = std::min(box_1.y_max, box_2.y_max) - std::max(box_1.y_min, box_2.y_min);
+ if (overlap_area_width <= 0.0f || overlap_area_height <= 0.0f) {
+ return 0.0f;
+ }
+ const float intersection = overlap_area_width * overlap_area_height;
+ const float box_1_area = (box_1.y_max - box_1.y_min) * (box_1.x_max - box_1.x_min);
+ const float box_2_area = (box_2.y_max - box_2.y_min) * (box_2.x_max - box_2.x_min);
+ const float union_area = (box_1_area + box_2_area - intersection);
+
+ return (intersection / union_area);
+ }
+
+ void NmsPostProcessOp::remove_overlapping_boxes(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
+ {
+ std::sort(detections.begin(), detections.end(),
+ [](DetectionBbox a, DetectionBbox b)
+ { return a.m_bbox.score > b.m_bbox.score; });
+
+ for (size_t i = 0; i < detections.size(); i++) {
+ if (detections[i].m_bbox.score == REMOVED_CLASS_SCORE) {
+ // Detection overlapped with a higher score detection
+ continue;
+ }
+
+ for (size_t j = i + 1; j < detections.size(); j++) {
+ if (detections[j].m_bbox.score == REMOVED_CLASS_SCORE) {
+ // Detection overlapped with a higher score detection
+ continue;
+ }
+
+ if (detections[i].m_class_id == detections[j].m_class_id
+ && (compute_iou(detections[i].m_bbox, detections[j].m_bbox) >= m_nms_config.nms_iou_th)) {
+ // Remove detections[j] if the iou is higher then the threshold
+ detections[j].m_bbox.score = REMOVED_CLASS_SCORE;
+ assert(detections[i].m_class_id < classes_detections_count.size());
+ assert(classes_detections_count[detections[j].m_class_id] > 0);
+ classes_detections_count[detections[j].m_class_id]--;
+ }
+ }
+ }
+ }
+
+ void NmsPostProcessOp::fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
+ std::vector<uint32_t> &classes_detections_count)
+ {
+ // Calculate the number of detections before each class, to help us later calculate the buffer_offset for it's detections.
+ std::vector<uint32_t> num_of_detections_before;
+ num_of_detections_before.reserve(m_nms_config.classes);
+ uint32_t ignored_detections_count = 0;
+ for (size_t class_idx = 0; class_idx < m_nms_config.classes; class_idx++) {
+ if (classes_detections_count[class_idx] > m_nms_config.max_proposals_per_class) {
+ ignored_detections_count += (classes_detections_count[class_idx] - m_nms_config.max_proposals_per_class);
+ classes_detections_count[class_idx] = m_nms_config.max_proposals_per_class;
+ }
+
+ if (0 == class_idx) {
+ num_of_detections_before[class_idx] = 0;
+ }
+ else {
+ num_of_detections_before[class_idx] = num_of_detections_before[class_idx - 1] + classes_detections_count[class_idx - 1];
+ }
+
+ // Fill `bbox_count` value for class_idx in the result buffer
+ float32_t bbox_count_casted = static_cast<float32_t>(classes_detections_count[class_idx]);
+ auto buffer_offset = (class_idx * sizeof(bbox_count_casted)) + (num_of_detections_before[class_idx] * sizeof(hailo_bbox_float32_t));
+ memcpy((buffer.data() + buffer_offset), &bbox_count_casted, sizeof(bbox_count_casted));
+ }
+
+ for (auto &detection : detections) {
+ if (REMOVED_CLASS_SCORE == detection.m_bbox.score) {
+ // Detection overlapped with a higher score detection and removed in remove_overlapping_boxes()
+ continue;
+ }
+ if (0 == classes_detections_count[detection.m_class_id]) {
+ // This class' detections count is higher then m_nms_config.max_proposals_per_class.
+ // This detection is ignored due to having lower score (detections vector is sorted by score).
+ continue;
+ }
+
+ auto buffer_offset = ((detection.m_class_id + 1) * sizeof(float32_t))
+ + (num_of_detections_before[detection.m_class_id] * sizeof(hailo_bbox_float32_t));
+
+ assert((buffer_offset + sizeof(hailo_bbox_float32_t)) <= buffer.size());
+ memcpy((hailo_bbox_float32_t*)(buffer.data() + buffer_offset), &detection.m_bbox, sizeof(hailo_bbox_float32_t));
+ num_of_detections_before[detection.m_class_id]++;
+ classes_detections_count[detection.m_class_id]--;
+ }
+
+ if (0 != ignored_detections_count) {
+ LOGGER__INFO("{} Detections were ignored, due to `max_bboxes_per_class` defined as {}.",
+ ignored_detections_count, m_nms_config.max_proposals_per_class);
+ }
+ }
+
+ hailo_status NmsPostProcessOp::hailo_nms_format(std::vector<DetectionBbox> &&detections,
+ MemoryView dst_view, std::vector<uint32_t> &classes_detections_count)
+ {
+ remove_overlapping_boxes(detections, classes_detections_count);
+ fill_nms_format_buffer(dst_view, detections, classes_detections_count);
+ return HAILO_SUCCESS;
+ }
+
+ std::string NmsPostProcessOp::get_nms_config_description()
+ {
+ auto config_info = fmt::format("Score threshold: {:.3f}, Iou threshold: {:.2f}, Classes: {}, Cross classes: {}",
+ m_nms_config.nms_score_th, m_nms_config.nms_iou_th, m_nms_config.classes, m_nms_config.cross_classes);
+ if (m_nms_config.background_removal) {
+ config_info += fmt::format(", Background removal index: {}", m_nms_config.background_removal_index);
+ }
+ return config_info;
+ }
+}
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file op.hpp
+ * @brief Net-Flow op
+ *
+ * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
+ * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-process.
+ **/
+
+#ifndef _HAILO_NET_FLOW_NMS_POST_PROCESS_HPP_
+#define _HAILO_NET_FLOW_NMS_POST_PROCESS_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/quantization.hpp"
+#include "hailo/buffer.hpp"
+
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+
+#include "net_flow/ops/op.hpp"
+
+
+namespace hailort
+{
+namespace net_flow
+{
+
+#define INVALID_BBOX_DIM (std::numeric_limits<float32_t>::max())
+#define INVALID_NMS_DETECTION (std::numeric_limits<uint32_t>::max())
+#define INVALID_NMS_SCORE (std::numeric_limits<float32_t>::max())
+
+struct DetectionBbox
+{
+ DetectionBbox(float32_t x_min, float32_t y_min, float32_t width, float32_t height, float32_t score, uint32_t class_id)
+ : m_class_id(class_id), m_bbox{y_min, x_min, (y_min + height), (x_min + width), score} {}
+
+ DetectionBbox(const hailo_bbox_float32_t &bbox, uint32_t class_id)
+ : m_class_id(class_id), m_bbox(bbox) {}
+
+ DetectionBbox() : DetectionBbox(hailo_bbox_float32_t{
+ INVALID_BBOX_DIM,
+ INVALID_BBOX_DIM,
+ INVALID_BBOX_DIM,
+ INVALID_BBOX_DIM,
+ INVALID_BBOX_DIM
+ }, INVALID_NMS_DETECTION) {}
+
+ uint32_t m_class_id;
+ hailo_bbox_float32_t m_bbox;
+};
+
+struct NmsPostProcessConfig
+{
+ // User given confidence threshold for a bbox. A bbox will be consider as detection if the
+ // (objectness * class_score) is higher then the confidence_threshold.
+ double nms_score_th = 0;
+
+ // User given IOU threshold (intersection over union). This threshold is for performing
+ // Non-maximum suppression (Removing overlapping boxes).
+ double nms_iou_th = 0;
+
+ // Maximum amount of bboxes per nms class.
+ uint32_t max_proposals_per_class = 0;
+
+ // The model's number of classes. (This depends on the dataset that the model trained on).
+ uint32_t classes = 0;
+
+ // Toggle background class removal from results
+ bool background_removal = false;
+
+ // Index of background class for background removal
+ uint32_t background_removal_index = 0;
+
+ // Indicates whether or not NMS performs IOU over different classes for the same box.
+ // If set to false - NMS won't intersect different classes, and a box could have multiple labels.
+ bool cross_classes = false;
+};
+
+static const float32_t REMOVED_CLASS_SCORE = 0.0f;
+
+class NmsPostProcessOp : public Op
+{
+public:
+ virtual ~NmsPostProcessOp() = default;
+
+ /**
+ * Computes the IOU ratio of @a box_1 and @a box_2
+ */
+ static float compute_iou(const hailo_bbox_float32_t &box_1, const hailo_bbox_float32_t &box_2);
+
+protected:
+ NmsPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const std::string &name)
+ : Op(inputs_metadata, outputs_metadata, name)
+ , m_nms_config(nms_post_process_config)
+ {}
+
+ NmsPostProcessConfig m_nms_config;
+
+ template<typename HostType = float32_t, typename DeviceType>
+ std::pair<uint32_t, float32_t> get_max_class(const DeviceType *data, uint32_t entry_idx, uint32_t classes_start_index,
+ float32_t objectness, hailo_quant_info_t quant_info, uint32_t width)
+ {
+ std::pair<uint32_t, float32_t> max_id_score_pair;
+ for (uint32_t class_index = 0; class_index < m_nms_config.classes; class_index++) {
+ auto class_id = class_index;
+ if (m_nms_config.background_removal) {
+ if (m_nms_config.background_removal_index == class_index) {
+ // Ignore if class_index is background_removal_index
+ continue;
+ }
+ else if (0 == m_nms_config.background_removal_index) {
+ // background_removal_index will always be the first or last index.
+ // If it is the first one we need to reduce all classes id's in 1.
+ // If it is the last one we just ignore it in the previous if case.
+ class_id--;
+ }
+ }
+
+ auto class_entry_idx = entry_idx + ((classes_start_index + class_index) * width);
+ auto class_confidence = Quantization::dequantize_output<HostType, DeviceType>(data[class_entry_idx], quant_info);
+ auto class_score = class_confidence * objectness;
+ if (class_score > max_id_score_pair.second) {
+ max_id_score_pair.first = class_id;
+ max_id_score_pair.second = class_score;
+ }
+ }
+ return max_id_score_pair;
+ }
+
+ /**
+ * Removes overlapping boxes in @a detections by setting the class confidence to zero.
+ *
+ * @param[in] detections A vector of @a DetectionBbox containing the detections boxes after ::extract_detections() function.
+ *
+ */
+ void remove_overlapping_boxes(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count);
+
+ /*
+ * For each class the layout is
+ * \code
+ * struct (packed) {
+ * uint16_t/float32_t bbox_count;
+ * hailo_bbox_t/hailo_bbox_float32_t bbox[bbox_count];
+ * };
+ * \endcode
+ */
+ void fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
+ std::vector<uint32_t> &classes_detections_count);
+
+ hailo_status hailo_nms_format(std::vector<DetectionBbox> &&detections,
+ MemoryView dst_view, std::vector<uint32_t> &classes_detections_count);
+
+ std::string get_nms_config_description();
+
+};
+
+}
+}
+
+#endif // _HAILO_NET_FLOW_NMS_POST_PROCESS_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file op.hpp
+ * @brief Net-Flow op
+ *
+ * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
+ * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-process.
+ **/
+
+#ifndef _HAILO_NET_FLOW_OP_HPP_
+#define _HAILO_NET_FLOW_OP_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/buffer.hpp"
+
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+
+
+namespace hailort
+{
+namespace net_flow
+{
+
+struct BufferMetaData
+{
+ hailo_3d_image_shape_t shape;
+ hailo_3d_image_shape_t padded_shape;
+ hailo_format_t format;
+ hailo_quant_info_t quant_info;
+};
+
+
+class Op
+{
+public:
+ virtual ~Op() = default;
+
+ /**
+ * Executes operation on inferred data.
+ *
+ * @param[in] inputs A map between input names to input buffers.
+ * @param[in] outputs A map between outputs names and their pre-allocated buffers.
+ *
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ *
+ */
+ virtual hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) = 0;
+
+ const std::map<std::string, BufferMetaData> &inputs_metadata() const
+ {
+ return m_inputs_metadata;
+ }
+
+ const std::map<std::string, BufferMetaData> &outputs_metadata() const
+ {
+ return m_outputs_metadata;
+ }
+
+ std::string get_name() {
+ return m_name;
+ }
+
+ virtual std::string get_op_description() = 0;
+
+protected:
+ Op(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const std::string &name)
+ : m_inputs_metadata(inputs_metadata)
+ , m_outputs_metadata(outputs_metadata)
+ , m_name(name)
+ {}
+
+ std::map<std::string, BufferMetaData> m_inputs_metadata;
+ std::map<std::string, BufferMetaData> m_outputs_metadata;
+ const std::string m_name;
+};
+
+}
+}
+
+#endif // _HAILO_NET_FLOW_OP_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file ssd_post_process.cpp
+ * @brief SSD post process
+ *
+ * Reference code: https://github.com/winfredsu/ssd_postprocessing/blob/master/ssd_postprocessing.py
+ **/
+
+#include "net_flow/ops/ssd_post_process.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+Expected<std::shared_ptr<Op>> SSDPostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const SSDPostProcessConfig &ssd_post_process_config)
+{
+ for (auto &name_to_inputs_metadata : inputs_metadata) {
+ CHECK_AS_EXPECTED(name_to_inputs_metadata.second.format.order == HAILO_FORMAT_ORDER_NHCW, HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: Unexpected input format {}", name_to_inputs_metadata.second.format.order);
+ }
+
+ // Validate each anchor is mapped by reg and cls inputs
+ for (const auto ®_to_cls_name : ssd_post_process_config.reg_to_cls_inputs) {
+ CHECK_AS_EXPECTED(ssd_post_process_config.anchors.count(reg_to_cls_name.first), HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: anchors does not contain reg layer {}", reg_to_cls_name.first);
+ CHECK_AS_EXPECTED(ssd_post_process_config.anchors.count(reg_to_cls_name.second), HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: anchors does not contain cls layer {}", reg_to_cls_name.second);
+ const auto ®_anchors = ssd_post_process_config.anchors.at(reg_to_cls_name.first);
+ const auto &cls_anchors = ssd_post_process_config.anchors.at(reg_to_cls_name.second);
+ CHECK_AS_EXPECTED(reg_anchors.size() == cls_anchors.size(), HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: reg and cls layers have different number of anchors. reg: #{}, cls: #{}",
+ reg_anchors.size(), cls_anchors.size());
+ for (size_t i = 0; i < reg_anchors.size(); ++i) {
+ auto reg_anchor = reg_anchors[i];
+ auto cls_anchor = cls_anchors[i];
+ CHECK_AS_EXPECTED(reg_anchor == cls_anchor, HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: reg and cls layers have differenet anchors. reg: {}, cls: {}",
+ reg_anchor, cls_anchor);
+ }
+ }
+
+ // Validate regs and clss pairs have same shapes
+ for (const auto ®_to_cls_name : ssd_post_process_config.reg_to_cls_inputs) {
+ CHECK_AS_EXPECTED(inputs_metadata.count(reg_to_cls_name.first), HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: inputs_metadata does not contain reg layer {}", reg_to_cls_name.first);
+ CHECK_AS_EXPECTED(inputs_metadata.count(reg_to_cls_name.second), HAILO_INVALID_ARGUMENT,
+ "SSDPostProcessOp: inputs_metadata does not contain cls layer {}", reg_to_cls_name.second);
+ const auto ®_input_metadata = inputs_metadata.at(reg_to_cls_name.first);
+ const auto &cls_input_metadata = inputs_metadata.at(reg_to_cls_name.second);
+ // NOTE: padded shape might be different because features might be different,
+ // and padding is added when width*features % 8 != 0
+ CHECK_AS_EXPECTED((reg_input_metadata.shape.height == cls_input_metadata.shape.height)
+ && (reg_input_metadata.shape.width == cls_input_metadata.shape.width),
+ HAILO_INVALID_ARGUMENT, "SSDPostProcessOp: reg input {} has different shape than cls input {}",
+ reg_to_cls_name.first, reg_to_cls_name.second);
+ }
+ auto op = std::shared_ptr<SSDPostProcessOp>(new (std::nothrow) SSDPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, ssd_post_process_config));
+ CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+ return std::shared_ptr<Op>(std::move(op));
+}
+
+hailo_status SSDPostProcessOp::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
+{
+ CHECK(inputs.size() == m_ssd_config.anchors.size(), HAILO_INVALID_ARGUMENT,
+ "Anchors vector count must be equal to data vector count. Anchors size is {}, data size is {}",
+ m_ssd_config.anchors.size(), inputs.size());
+
+ std::vector<DetectionBbox> detections;
+ std::vector<uint32_t> classes_detections_count(m_nms_config.classes, 0);
+ detections.reserve(m_nms_config.max_proposals_per_class * m_nms_config.classes);
+ for (const auto ®_to_cls : m_ssd_config.reg_to_cls_inputs) {
+ assert(inputs.count(reg_to_cls.first));
+ assert(inputs.count(reg_to_cls.second));
+ auto status = extract_detections(reg_to_cls.first, reg_to_cls.second,
+ inputs.at(reg_to_cls.first), inputs.at(reg_to_cls.second),
+ detections, classes_detections_count);
+ CHECK_SUCCESS(status);
+ }
+
+ // TODO: Add support for TF_FORMAT_ORDER
+ return hailo_nms_format(std::move(detections), outputs.begin()->second, classes_detections_count);
+}
+
+hailo_status SSDPostProcessOp::extract_detections(const std::string ®_input_name, const std::string &cls_input_name,
+ const MemoryView ®_buffer, const MemoryView &cls_buffer,
+ std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
+{
+ const auto ®_shape = m_inputs_metadata[reg_input_name].shape;
+ const auto ®_padded_shape = m_inputs_metadata[reg_input_name].padded_shape;
+ const auto &cls_padded_shape = m_inputs_metadata[cls_input_name].padded_shape;
+
+ const uint32_t X_INDEX = m_ssd_config.tx_index;
+ const uint32_t Y_INDEX = m_ssd_config.ty_index;
+ const uint32_t W_INDEX = m_ssd_config.tw_index;
+ const uint32_t H_INDEX = m_ssd_config.th_index;
+
+ const uint32_t X_OFFSET = X_INDEX * reg_padded_shape.width;
+ const uint32_t Y_OFFSET = Y_INDEX * reg_padded_shape.width;
+ const uint32_t W_OFFSET = W_INDEX * reg_padded_shape.width;
+ const uint32_t H_OFFSET = H_INDEX * reg_padded_shape.width;
+
+ // Each layer anchors vector is structured as {w,h} pairs.
+ // For example, if we have a vector of size 6 (default SSD vector) then we have 3 anchors for this layer.
+ assert(m_ssd_config.anchors.count(reg_input_name));
+ assert(m_ssd_config.anchors.count(cls_input_name));
+ const auto &layer_anchors = m_ssd_config.anchors[reg_input_name];
+ assert(layer_anchors.size() % 2 == 0);
+ const size_t num_of_anchors = (layer_anchors.size() / 2);
+
+ // Validate reg buffer size
+ static const uint32_t reg_entry_size = 4;
+ auto number_of_entries = reg_padded_shape.height * reg_padded_shape.width * num_of_anchors;
+ auto buffer_size = number_of_entries * reg_entry_size;
+ CHECK(buffer_size == reg_buffer.size(), HAILO_INVALID_ARGUMENT,
+ "Failed to extract_detections, reg {} buffer_size should be {}, but is {}", reg_input_name, buffer_size, reg_buffer.size());
+
+ // Validate cls buffer size
+ const uint32_t cls_entry_size = m_nms_config.classes;
+ number_of_entries = cls_padded_shape.height * cls_padded_shape.width * num_of_anchors;
+ buffer_size = number_of_entries * cls_entry_size;
+ CHECK(buffer_size == cls_buffer.size(), HAILO_INVALID_ARGUMENT,
+ "Failed to extract_detections, cls {} buffer_size should be {}, but is {}", cls_input_name, buffer_size, cls_buffer.size());
+
+ auto reg_row_size = reg_padded_shape.width * reg_padded_shape.features;
+ auto cls_row_size = cls_padded_shape.width * cls_padded_shape.features;
+ for (uint32_t row = 0; row < reg_shape.height; row++) {
+ for (uint32_t col = 0; col < reg_shape.width; col++) {
+ for (uint32_t anchor = 0; anchor < num_of_anchors; anchor++) {
+ auto reg_idx = (reg_row_size * row) + col + ((anchor * reg_entry_size) * reg_padded_shape.width);
+ auto cls_idx = (cls_row_size * row) + col + ((anchor * cls_entry_size) * cls_padded_shape.width);
+ const auto &wa = layer_anchors[anchor * 2];
+ const auto &ha = layer_anchors[anchor * 2 + 1];
+ auto anchor_w_stride = 1.0f / static_cast<float32_t>(reg_shape.width);
+ auto anchor_h_stride = 1.0f / static_cast<float32_t>(reg_shape.height);
+ auto anchor_w_offset = 0.5f * anchor_w_stride;
+ auto anchor_h_offset = 0.5f * anchor_h_stride;
+ auto xcenter_a = static_cast<float32_t>(col) * anchor_w_stride + anchor_w_offset;
+ auto ycenter_a = static_cast<float32_t>(row) * anchor_h_stride + anchor_h_offset;
+ // Decode bboxes
+ if (m_inputs_metadata[reg_input_name].format.type == HAILO_FORMAT_TYPE_UINT8) {
+ auto status = extract_bbox_detections<float32_t, uint8_t>(
+ reg_input_name, cls_input_name,
+ reg_buffer, cls_buffer,
+ reg_idx + X_OFFSET,
+ reg_idx + Y_OFFSET,
+ reg_idx + W_OFFSET,
+ reg_idx + H_OFFSET,
+ cls_idx, wa, ha, xcenter_a, ycenter_a,
+ detections, classes_detections_count);
+ CHECK_SUCCESS(status);
+ } else if (m_inputs_metadata[reg_input_name].format.type == HAILO_FORMAT_TYPE_UINT16) {
+ auto status = extract_bbox_detections<float32_t, uint16_t>(
+ reg_input_name, cls_input_name,
+ reg_buffer, cls_buffer,
+ reg_idx + X_OFFSET,
+ reg_idx + Y_OFFSET,
+ reg_idx + W_OFFSET,
+ reg_idx + H_OFFSET,
+ cls_idx, wa, ha, xcenter_a, ycenter_a,
+ detections, classes_detections_count);
+ CHECK_SUCCESS(status);
+ } else if (m_inputs_metadata[reg_input_name].format.type == HAILO_FORMAT_TYPE_FLOAT32) {
+ // For testing - TODO: Remove after generator tests are in, and return error.
+ auto status = extract_bbox_detections<float32_t, float32_t>(
+ reg_input_name, cls_input_name,
+ reg_buffer, cls_buffer,
+ reg_idx + X_OFFSET,
+ reg_idx + Y_OFFSET,
+ reg_idx + W_OFFSET,
+ reg_idx + H_OFFSET,
+ cls_idx, wa, ha, xcenter_a, ycenter_a,
+ detections, classes_detections_count);
+ CHECK_SUCCESS(status);
+ } else {
+ CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "SSD post-process received invalid reg input type: {}",
+ m_inputs_metadata[reg_input_name].format.type);
+ }
+ }
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+std::string SSDPostProcessOp::get_op_description()
+{
+ auto nms_config_info = get_nms_config_description();
+ auto config_info = fmt::format("Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}, Centers scales factor: {}, "
+ "Bbox dimension scale factor: {}, Normalize boxes: {}", m_name, nms_config_info, m_ssd_config.image_height, m_ssd_config.image_width,
+ m_ssd_config.centers_scale_factor, m_ssd_config.bbox_dimensions_scale_factor, m_ssd_config.normalize_boxes);
+ return config_info;
+}
+
+}
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file ssd_post_process.hpp
+ * @brief SSD post process
+ *
+ * Reference code: https://github.com/winfredsu/ssd_postprocessing/blob/master/ssd_postprocessing.py
+ **/
+
+#ifndef _HAILO_SSD_POST_PROCESS_HPP_
+#define _HAILO_SSD_POST_PROCESS_HPP_
+
+#include "net_flow/ops/nms_post_process.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+struct SSDPostProcessConfig
+{
+ // The image height.
+ float32_t image_height = 0;
+
+ // The image width.
+ float32_t image_width = 0;
+
+ uint32_t centers_scale_factor = 0;
+
+ uint32_t bbox_dimensions_scale_factor = 0;
+
+ uint32_t ty_index = 0;
+ uint32_t tx_index = 0;
+ uint32_t th_index = 0;
+ uint32_t tw_index = 0;
+
+ std::map<std::string, std::string> reg_to_cls_inputs;
+
+ // A vector of anchors, each element in the vector represents the anchors for a specific layer
+ // Each layer anchors vector is structured as {w,h} pairs.
+ // Each anchor is mapped by 2 keys:
+ // 1. reg input
+ // 2. cls input
+ std::map<std::string, std::vector<float32_t>> anchors;
+
+ // Indicates whether boxes should be normalized (and clipped)
+ bool normalize_boxes = false;
+};
+
+class SSDPostProcessOp : public NmsPostProcessOp
+{
+
+public:
+ static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const SSDPostProcessConfig &ssd_post_process_config);
+
+ hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
+ std::string get_op_description() override;
+
+ static const uint32_t DEFAULT_Y_OFFSET_IDX = 0;
+ static const uint32_t DEFAULT_X_OFFSET_IDX = 1;
+ static const uint32_t DEFAULT_H_OFFSET_IDX = 2;
+ static const uint32_t DEFAULT_W_OFFSET_IDX = 3;
+
+private:
+ SSDPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const SSDPostProcessConfig &ssd_post_process_config)
+ : NmsPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, "SSD-Post-Process")
+ , m_ssd_config(ssd_post_process_config)
+ {}
+
+ SSDPostProcessConfig m_ssd_config;
+
+ template<typename HostType = float32_t, typename DeviceType>
+ void extract_bbox_classes(const hailo_bbox_float32_t &dims_bbox, DeviceType *cls_data, const BufferMetaData &cls_metadata, uint32_t cls_index,
+ std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
+ {
+ if (m_nms_config.cross_classes) {
+ // Pre-NMS optimization. If NMS checks IOU over different classes, only the maximum class is relevant
+ auto max_id_score_pair = get_max_class<HostType, DeviceType>(cls_data, cls_index, 0, 1,
+ cls_metadata.quant_info, cls_metadata.padded_shape.width);
+ auto bbox = dims_bbox;
+ bbox.score = max_id_score_pair.second;
+ if (max_id_score_pair.second >= m_nms_config.nms_score_th) {
+ detections.emplace_back(DetectionBbox(bbox, max_id_score_pair.first));
+ classes_detections_count[max_id_score_pair.first]++;
+ }
+ } else {
+ for (uint32_t class_index = 0; class_index < m_nms_config.classes; class_index++) {
+ auto class_id = class_index;
+ if (m_nms_config.background_removal) {
+ if (m_nms_config.background_removal_index == class_index) {
+ // Ignore if class_index is background_removal_index
+ continue;
+ }
+ else if (0 == m_nms_config.background_removal_index) {
+ // background_removal_index will always be the first or last index.
+ // If it is the first one we need to reduce all classes id's in 1.
+ // If it is the last one we just ignore it in the previous if case.
+ class_id--;
+ }
+ }
+
+ auto class_entry_idx = cls_index + (class_index * cls_metadata.padded_shape.width);
+ auto class_score = Quantization::dequantize_output<HostType, DeviceType>(cls_data[class_entry_idx],
+ cls_metadata.quant_info);
+ if (class_score < m_nms_config.nms_score_th) {
+ continue;
+ }
+ auto bbox = dims_bbox;
+ bbox.score = class_score;
+ detections.emplace_back(bbox, class_id);
+ classes_detections_count[class_id]++;
+ }
+ }
+ }
+
+ template<typename HostType = float32_t, typename DeviceType>
+ hailo_status extract_bbox_detections(const std::string ®_input_name, const std::string &cls_input_name,
+ const MemoryView ®_buffer, const MemoryView &cls_buffer,
+ uint64_t x_index, uint64_t y_index, uint64_t w_index, uint64_t h_index,
+ uint32_t cls_index, float32_t wa, float32_t ha, float32_t xcenter_a, float32_t ycenter_a,
+ std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
+ {
+ const auto &shape = m_inputs_metadata[reg_input_name].shape;
+ const auto ®_quant_info = m_inputs_metadata[reg_input_name].quant_info;
+ DeviceType *reg_data = (DeviceType*)reg_buffer.data();
+ auto *cls_data = cls_buffer.data();
+ auto tx = Quantization::dequantize_output<HostType, DeviceType>(reg_data[x_index], reg_quant_info);
+ auto ty = Quantization::dequantize_output<HostType, DeviceType>(reg_data[y_index], reg_quant_info);
+ auto tw = Quantization::dequantize_output<HostType, DeviceType>(reg_data[w_index], reg_quant_info);
+ auto th = Quantization::dequantize_output<HostType, DeviceType>(reg_data[h_index], reg_quant_info);
+ tx /= static_cast<float32_t>(m_ssd_config.centers_scale_factor);
+ ty /= static_cast<float32_t>(m_ssd_config.centers_scale_factor);
+ tw /= static_cast<float32_t>(m_ssd_config.bbox_dimensions_scale_factor);
+ th /= static_cast<float32_t>(m_ssd_config.bbox_dimensions_scale_factor);
+ auto w = exp(tw) * wa;
+ auto h = exp(th) * ha;
+ auto x_center = tx * wa + xcenter_a;
+ auto y_center = ty * ha + ycenter_a;
+ auto x_min = (x_center - (w / 2.0f));
+ auto y_min = (y_center - (h / 2.0f));
+ auto x_max = (x_center + (w / 2.0f));
+ auto y_max = (y_center + (h / 2.0f));
+
+ // TODO: HRT-10033 - Fix support for clip_boxes and normalize_output
+ // Currently `normalize_boxes` is always false
+ if (m_ssd_config.normalize_boxes) {
+ x_min = Quantization::clip(x_min, 0, static_cast<float32_t>(shape.width-1));
+ y_min = Quantization::clip(y_min, 0, static_cast<float32_t>(shape.height-1));
+ x_max = Quantization::clip(x_max, 0, static_cast<float32_t>(shape.width-1));
+ y_max = Quantization::clip(y_max, 0, static_cast<float32_t>(shape.height-1));
+ }
+ hailo_bbox_float32_t dims_bbox{y_min, x_min, y_max, x_max, 0};
+ const auto &cls_metadata = m_inputs_metadata[cls_input_name];
+ if (cls_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
+ extract_bbox_classes<HostType, uint8_t>(dims_bbox, (uint8_t*)cls_data, m_inputs_metadata[cls_input_name],
+ cls_index, detections, classes_detections_count);
+ } else if (cls_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
+ extract_bbox_classes<HostType, uint16_t>(dims_bbox, (uint16_t*)cls_data, m_inputs_metadata[cls_input_name],
+ cls_index, detections, classes_detections_count);
+ } else if (cls_metadata.format.type == HAILO_FORMAT_TYPE_FLOAT32) {
+ extract_bbox_classes<HostType, float32_t>(dims_bbox, (float32_t*)cls_data, m_inputs_metadata[cls_input_name],
+ cls_index, detections, classes_detections_count);
+ } else {
+ CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "SSD post-process received invalid cls input type: {}",
+ m_inputs_metadata[cls_input_name].format.type);
+ }
+ return HAILO_SUCCESS;
+ }
+
+ /**
+ * Extract bboxes with confidence level higher then @a confidence_threshold from @a buffer and add them to @a detections.
+ *
+ * @param[in] reg_input_name Name of the regression input
+ * @param[in] cls_input_name Name of the classes input
+ * @param[in] reg_buffer Buffer containing the boxes data after inference
+ * @param[in] cls_buffer Buffer containing the classes ids after inference.
+ * @param[inout] detections A vector of ::DetectionBbox objects, to add the detected bboxes to.
+ * @param[inout] classes_detections_count A vector of uint32_t, to add count of detections count per class to.
+ *
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ */
+ hailo_status extract_detections(const std::string ®_input_name, const std::string &cls_input_name,
+ const MemoryView ®_buffer, const MemoryView &cls_buffer,
+ std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count);
+};
+
+
+}
+
+}
+
+#endif // _HAILO_SSD_POST_PROCESSING_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file yolo_post_process.cpp
+ * @brief YOLO post process
+ *
+ * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
+ * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
+ **/
+
+#include "net_flow/ops/yolo_post_process.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+Expected<std::shared_ptr<Op>> YOLOv5PostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config)
+{
+ for (auto &name_to_inputs_metadata : inputs_metadata) {
+ CHECK_AS_EXPECTED(name_to_inputs_metadata.second.format.order == HAILO_FORMAT_ORDER_NHCW, HAILO_INVALID_ARGUMENT,
+ "YOLOv5PostProcessOp: Unexpected input format {}", name_to_inputs_metadata.second.format.order);
+ }
+ auto op = std::shared_ptr<YOLOv5PostProcessOp>(new (std::nothrow) YOLOv5PostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config));
+ CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+ return std::shared_ptr<Op>(std::move(op));
+}
+
+Expected<std::shared_ptr<Op>> YOLOXPostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config)
+{
+ for (auto &name_to_inputs_metadata : inputs_metadata) {
+ CHECK_AS_EXPECTED(name_to_inputs_metadata.second.format.order == HAILO_FORMAT_ORDER_NHCW, HAILO_INVALID_ARGUMENT,
+ "YOLOv5PostProcessOp: Unexpected input format {}", name_to_inputs_metadata.second.format.order);
+ }
+ auto modified_yolo_post_process_config = yolo_post_process_config;
+ for (auto &name_to_meta : inputs_metadata) {
+ std::vector<int> anchors = {1, 1};
+ modified_yolo_post_process_config.anchors.insert({name_to_meta.first, anchors});
+ }
+ auto op = std::shared_ptr<YOLOXPostProcessOp>(new (std::nothrow) YOLOXPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config,
+ modified_yolo_post_process_config));
+ CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+ return std::shared_ptr<Op>(std::move(op));
+}
+
+hailo_status YOLOPostProcessOp::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
+{
+ CHECK(inputs.size() == m_yolo_config.anchors.size(), HAILO_INVALID_ARGUMENT,
+ "Anchors vector count must be equal to data vector count. Anchors size is {}, data size is {}",
+ m_yolo_config.anchors.size(), inputs.size());
+
+ std::vector<DetectionBbox> detections;
+ std::vector<uint32_t> classes_detections_count(m_nms_config.classes, 0);
+ detections.reserve(m_nms_config.max_proposals_per_class * m_nms_config.classes);
+ for (const auto &name_to_input : inputs) {
+ hailo_status status;
+ auto &name = name_to_input.first;
+ auto &input_metadata = m_inputs_metadata[name];
+ if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
+ status = extract_detections<float32_t, uint8_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
+ input_metadata.padded_shape, m_yolo_config.anchors[name], detections, classes_detections_count);
+ } else if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
+ status = extract_detections<float32_t, uint16_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
+ input_metadata.padded_shape, m_yolo_config.anchors[name], detections, classes_detections_count);
+ } else {
+ CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "YOLOv5 post-process received invalid input type");
+ }
+ CHECK_SUCCESS(status);
+ }
+
+ // TODO: Add support for TF_FORMAT_ORDER
+ return hailo_nms_format(std::move(detections), outputs.begin()->second, classes_detections_count);
+}
+
+std::string YOLOPostProcessOp::get_op_description()
+{
+ auto nms_config_info = get_nms_config_description();
+ auto config_info = fmt::format("Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}",
+ m_name, nms_config_info, m_yolo_config.image_height, m_yolo_config.image_width);
+ return config_info;
+}
+
+hailo_bbox_float32_t YOLOv5PostProcessOp::decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+ int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const
+{
+ auto w = pow(2.0f * tw, 2.0f) * static_cast<float32_t>(wa) / m_yolo_config.image_width;
+ auto h = pow(2.0f * th, 2.0f) * static_cast<float32_t>(ha) / m_yolo_config.image_height;
+ auto x_center = (tx * 2.0f - 0.5f + static_cast<float32_t>(col)) / static_cast<float32_t>(w_stride);
+ auto y_center = (ty * 2.0f - 0.5f + static_cast<float32_t>(row)) / static_cast<float32_t>(h_stride);
+ auto x_min = (x_center - (w / 2.0f));
+ auto y_min = (y_center - (h / 2.0f));
+ return hailo_bbox_float32_t{y_min, x_min, (y_min+h), (x_min+w), 0};
+}
+
+hailo_bbox_float32_t YOLOXPostProcessOp::decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+ int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const
+{
+ auto w = exp(tw) * static_cast<float32_t>(wa) / m_yolo_config.image_width;
+ auto h = exp(th) * static_cast<float32_t>(ha) / m_yolo_config.image_height;
+ auto x_center = (tx + static_cast<float32_t>(col)) / static_cast<float32_t>(w_stride);
+ auto y_center = (ty + static_cast<float32_t>(row)) / static_cast<float32_t>(h_stride);
+ auto x_min = (x_center - (w / 2.0f));
+ auto y_min = (y_center - (h / 2.0f));
+ return hailo_bbox_float32_t{y_min, x_min, (y_min+h), (x_min+w), 0};
+}
+
+} // namespace net_flow
+} // namespace hailort
+
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file yolo_post_process.hpp
+ * @brief YOLO post process
+ *
+ * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
+ * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
+ **/
+
+#ifndef _HAILO_YOLO_POST_PROCESS_HPP_
+#define _HAILO_YOLO_POST_PROCESS_HPP_
+
+#include "net_flow/ops/nms_post_process.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+struct YoloPostProcessConfig
+{
+ // The image height.
+ float32_t image_height = 0;
+
+ // The image width.
+ float32_t image_width = 0;
+
+ // A vector of anchors, each element in the vector represents the anchors for a specific layer
+ // Each layer anchors vector is structured as {w,h} pairs.
+ std::map<std::string, std::vector<int>> anchors;
+};
+
+
+class YOLOPostProcessOp : public NmsPostProcessOp
+{
+public:
+ hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
+ std::string get_op_description() override;
+
+protected:
+ virtual hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+ int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const = 0;
+
+ YOLOPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config,
+ const std::string &name)
+ : NmsPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, name)
+ , m_yolo_config(yolo_post_process_config)
+ {}
+
+ YoloPostProcessConfig m_yolo_config;
+
+private:
+ /**
+ * Extract bboxes with confidence level higher then @a confidence_threshold from @a buffer and add them to @a detections.
+ *
+ * @param[in] buffer Buffer containing data after inference
+ * @param[in] quant_info Quantization info corresponding to the @a buffer layer.
+ * @param[in] shape Shape corresponding to the @a buffer layer.
+ * @param[in] layer_anchors The layer anchors corresponding to layer receiving the @a buffer.
+ * Each anchor is structured as {width, height} pairs.
+ * @param[inout] detections A vector of ::DetectionBbox objects, to add the detected bboxes to.
+ * @param[inout] classes_detections_count A vector of uint32_t, to add count of detections count per class to.
+ *
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ */
+ template<typename HostType = float32_t, typename DeviceType>
+ hailo_status extract_detections(const MemoryView &buffer, hailo_quant_info_t quant_info,
+ hailo_3d_image_shape_t shape, hailo_3d_image_shape_t padded_shape,
+ const std::vector<int> &layer_anchors, std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
+ {
+ static const uint32_t X_INDEX = 0;
+ static const uint32_t Y_INDEX = 1;
+ static const uint32_t W_INDEX = 2;
+ static const uint32_t H_INDEX = 3;
+ static const uint32_t OBJECTNESS_INDEX = 4;
+ static const uint32_t CLASSES_START_INDEX = 5;
+
+ const uint32_t X_OFFSET = X_INDEX * padded_shape.width;
+ const uint32_t Y_OFFSET = Y_INDEX * padded_shape.width;
+ const uint32_t W_OFFSET = W_INDEX * padded_shape.width;
+ const uint32_t H_OFFSET = H_INDEX * padded_shape.width;
+ const uint32_t OBJECTNESS_OFFSET = OBJECTNESS_INDEX * padded_shape.width;
+
+ // Each layer anchors vector is structured as {w,h} pairs.
+ // For example, if we have a vector of size 6 (default YOLOv5 vector) then we have 3 anchors for this layer.
+ assert(layer_anchors.size() % 2 == 0);
+ const size_t num_of_anchors = (layer_anchors.size() / 2);
+
+ uint32_t entry_size = (uint32_t)((CLASSES_START_INDEX + m_nms_config.classes) * sizeof(DeviceType));
+ auto number_of_entries = padded_shape.height * padded_shape.width * num_of_anchors;
+ // TODO: this can also be part of the Op configuration
+ auto buffer_size = number_of_entries * entry_size;
+ CHECK(buffer_size == buffer.size(), HAILO_INVALID_ARGUMENT,
+ "Failed to extract_detections, buffer_size should be {}, but is {}", buffer_size, buffer.size());
+
+ auto row_size = padded_shape.width * padded_shape.features;
+ DeviceType *data = (DeviceType*)buffer.data();
+ for (uint32_t row = 0; row < shape.height; row++) {
+ for (uint32_t col = 0; col < shape.width; col++) {
+ for (uint32_t anchor = 0; anchor < num_of_anchors; anchor++) {
+ auto entry_idx = (row_size * row) + col + ((anchor * entry_size) * padded_shape.width);
+
+ auto objectness = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + OBJECTNESS_OFFSET], quant_info);
+ if (objectness < m_nms_config.nms_score_th) {
+ continue;
+ }
+
+ auto tx = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + X_OFFSET], quant_info);
+ auto ty = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + Y_OFFSET], quant_info);
+ auto tw = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + W_OFFSET], quant_info);
+ auto th = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + H_OFFSET], quant_info);
+ auto bbox = decode(tx, ty, tw, th, layer_anchors[anchor * 2], layer_anchors[anchor * 2 + 1], col, row,
+ shape.width, shape.height);
+
+ // Source for the calculations - https://github.com/ultralytics/yolov5/blob/HEAD/models/yolo.py
+ // Explanations for the calculations - https://github.com/ultralytics/yolov5/issues/471
+ if (m_nms_config.cross_classes) {
+ // Pre-NMS optimization. If NMS checks IOU over different classes, only the maximum class is relevant
+ auto max_id_score_pair = get_max_class<HostType, DeviceType>(data, entry_idx, CLASSES_START_INDEX, objectness, quant_info, padded_shape.width);
+ bbox.score = max_id_score_pair.second;
+ if (max_id_score_pair.second >= m_nms_config.nms_score_th) {
+ detections.emplace_back(DetectionBbox(bbox, max_id_score_pair.first));
+ classes_detections_count[max_id_score_pair.first]++;
+ }
+ }
+ else {
+ for (uint32_t class_index = 0; class_index < m_nms_config.classes; class_index++) {
+ auto class_entry_idx = entry_idx + ((CLASSES_START_INDEX + class_index) * padded_shape.width);
+ auto class_confidence = Quantization::dequantize_output<HostType, DeviceType>(
+ data[class_entry_idx], quant_info);
+ auto class_score = class_confidence * objectness;
+ if (class_score >= m_nms_config.nms_score_th) {
+ bbox.score = class_score;
+ detections.emplace_back(DetectionBbox(bbox, class_index));
+ classes_detections_count[class_index]++;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return HAILO_SUCCESS;
+ }
+};
+
+class YOLOv5PostProcessOp : public YOLOPostProcessOp
+{
+public:
+ static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config);
+
+protected:
+ virtual hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+ int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const override;
+
+private:
+ YOLOv5PostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config)
+ : YOLOPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config, "YOLOv5-Post-Process")
+ {}
+};
+
+class YOLOXPostProcessOp : public YOLOPostProcessOp
+{
+public:
+ static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config);
+
+protected:
+ virtual hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+ int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const override;
+
+private:
+ YOLOXPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
+ const std::map<std::string, BufferMetaData> &outputs_metadata,
+ const NmsPostProcessConfig &nms_post_process_config,
+ const YoloPostProcessConfig &yolo_post_process_config)
+ : YOLOPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config, "YOLOX-Post-Process")
+ {}
+};
+
+} // namespace net_flow
+} // namespace hailort
+
+#endif // _HAILO_YOLO_POST_PROCESS_HPP_
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file yolo_post_processing.hpp
- * @brief YOLO post processing
- *
- * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
- * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
- **/
-
-#ifndef _HAILO_YOLO_POST_PROCESSING_HPP_
-#define _HAILO_YOLO_POST_PROCESSING_HPP_
-
-#include "hailo/hailort.hpp"
-
-namespace hailort
-{
-namespace net_flow
-{
-
-static const float32_t REMOVED_CLASS_SCORE = 0.0f;
-
-struct DetectionBbox
-{
- DetectionBbox(float32_t x_min, float32_t y_min, float32_t width, float32_t height, float32_t score, uint32_t class_id) :
- m_class_id(class_id), m_bbox{y_min, x_min, (y_min + height), (x_min + width), score} {}
-
- uint32_t m_class_id;
- hailo_bbox_float32_t m_bbox;
-};
-
-/**
- * Computes the value of the sigmoid function on @a x input: f(x) = 1/(1 + e^-x)
-*/
-inline float32_t sigmoid(float32_t x)
-{
- return 1.0f / (1.0f + expf(-x));
-}
-
-// TODO: Maybe change to dequantize entry and add @a should_sigmoid to dequantize_output_buffer in `quantization.hpp`.
-// Its an API addition but does not break anything.
-template<typename HostType = float32_t, typename DeviceType>
-HostType apply_dequantization_activation(DeviceType value, hailo_quant_info_t quant_info, bool should_sigmoid)
-{
- auto dequantized_val = Quantization::dequantize_output<HostType, DeviceType>(value, quant_info);
-
- if (should_sigmoid) {
- return sigmoid(dequantized_val);
- } else {
- return dequantized_val;
- }
-}
-
-class YOLOv5PostProcessingOp
-{
-public:
-
- /**
- * @param[in] anchors A vector of anchors, each element in the vector represents the anchors for a specific layer.
- * @param[in] image_height The image height.
- * @param[in] image_width The image width.
- * @param[in] confidence_threshold User given confidence threshold for a bbox. A bbox will be consider as detection if the
- * (objectness * class_score) is higher then the confidence_threshold.
- * @param[in] iou_threshold User given IOU threshold (intersection over union). This threshold is for performing
- * Non-maximum suppression (Removing overlapping boxes).
- * @param[in] num_of_classes The model's number of classes. (This depends on the dataset that the model trained on).
- * @param[in] should_dequantize Indicates whether the post-processing function should de-quantize the tensors data.
- * @param[in] max_bboxes_per_class Maximum amount of bboxes per nms class.
- * @param[in] should_sigmoid Indicates whether sigmoid() function should be performed on the @a tensors' data.
- * @param[in] one_class_per_bbox Indicates whether the post-processing function should return only one class per detected bbox.
- * If set to flase - Two different classes can have the same bbox.
- *
- * @return Upon success, returns a vector of detection objects. Otherwise, returns Unexpected of ::hailo_status error.
- * TODO: For integrating with SDK Json - consider changing anchors vector to a vector of w,h pairs.
- * HRT-8526 - Add post-processing support for quantized data
- */
- static Expected<YOLOv5PostProcessingOp> create(const std::vector<std::vector<int>> &anchors,
- const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
- const std::vector<hailo_quant_info_t> &quant_infos, float32_t image_height, float32_t image_width,
- float32_t confidence_threshold, float32_t iou_threshold, uint32_t num_of_classes, bool should_dequantize,
- uint32_t max_bboxes_per_class, bool should_sigmoid, bool one_class_per_bbox=true)
- {
- return YOLOv5PostProcessingOp(anchors, shapes, formats, quant_infos, image_height, image_width, confidence_threshold, iou_threshold,
- num_of_classes, should_dequantize, max_bboxes_per_class, should_sigmoid, one_class_per_bbox);
- }
-
- /**
- * Execute YOLOv5 post-processing on inferred data.
- * @a HostType can be uint16 or float32.
- * TODO: HRT-8525 - Add support for these types. Currently we support only in: @a HostType = float32_t
- *
- * @param[in] tensors A vector of the input buffers for the post-processing,
- * the buffer's shape and the quantization info.
- * NOTE: The Order of the @a tensors vector should be corresponding to the order of @a anchors vector given in the creation of YOLOv5PostProcessingOp.
- *
- * @return Upon success, returns a buffer containing the detection objects, in ::HAILO_FORMAT_ORDER_HAILO_NMS format.
- * Otherwise, returns Unexpected of ::hailo_status error.
- */
- template<typename HostType = float32_t>
- hailo_status execute(const std::vector<MemoryView> &tensors, MemoryView dst_view)
- {
- CHECK(tensors.size() == m_anchors.size(), HAILO_INVALID_ARGUMENT,
- "Anchors vector count must be equal to data vector count. Anchors size is {}, data size is {}", m_anchors.size(), tensors.size());
-
- std::vector<DetectionBbox> detections;
- std::vector<uint32_t> classes_detections_count(m_num_of_classes, 0);
- detections.reserve(m_max_bboxes_per_class * m_num_of_classes);
- for (size_t i = 0; i < tensors.size(); i++) {
- hailo_status status;
- if (m_formants[i].type == HAILO_FORMAT_TYPE_UINT8) {
- status = extract_detections<HostType, uint8_t>(tensors[i], m_quant_infos[i], m_shapes[i],
- m_anchors[i], detections, classes_detections_count);
- } else if (m_formants[i].type == HAILO_FORMAT_TYPE_UINT16) {
- status = extract_detections<HostType, uint16_t>(tensors[i], m_quant_infos[i], m_shapes[i],
- m_anchors[i], detections, classes_detections_count);
- } else {
- CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "YOLOv5 post-process received invalid input type");
- }
- CHECK_SUCCESS(status);
- }
-
- // TODO: Add support for TF_FORMAT_ORDER
- return hailo_nms_format(std::move(detections), dst_view, classes_detections_count);
- }
-
-private:
- YOLOv5PostProcessingOp(const std::vector<std::vector<int>> &anchors, const std::vector<hailo_3d_image_shape_t> &shapes,
- const std::vector<hailo_format_t> &formats, const std::vector<hailo_quant_info_t> &quant_infos, float32_t image_height, float32_t image_width,
- float32_t confidence_threshold, float32_t iou_threshold, uint32_t num_of_classes, bool should_dequantize, uint32_t max_bboxes_per_class, bool should_sigmoid, bool one_class_per_bbox) :
- m_anchors(anchors), m_shapes(shapes), m_formants(formats), m_quant_infos(quant_infos), m_image_height(image_height), m_image_width(image_width),
- m_confidence_threshold(confidence_threshold), m_iou_threshold(iou_threshold), m_num_of_classes(num_of_classes),
- m_should_dequantize(should_dequantize), m_max_bboxes_per_class(max_bboxes_per_class), m_should_sigmoid(should_sigmoid),
- m_one_class_per_bbox(one_class_per_bbox)
- {
- (void)m_should_dequantize;
- }
-
- template<typename HostType = float32_t, typename DeviceType>
- std::pair<uint32_t, float32_t> get_max_class(const uint8_t *data, size_t entry_classes_idx, float32_t objectness, hailo_quant_info_t quant_info)
- {
- std::pair<uint32_t, float32_t> max_id_score_pair;
- for (uint32_t class_index = 0; class_index < m_num_of_classes; class_index++) {
- auto class_confidence = apply_dequantization_activation<HostType, DeviceType>(data[entry_classes_idx + class_index], quant_info, m_should_sigmoid);
- auto class_score = class_confidence * objectness;
- if (class_score > max_id_score_pair.second) {
- max_id_score_pair.first = class_index;
- max_id_score_pair.second = class_score;
- }
- }
- return max_id_score_pair;
- }
-
- /**
- * Extract bboxes with confidence level higher then @a confidence_threshold from @a buffer and add them to @a detections.
- *
- * @param[in] buffer Buffer containing data after inference and
- * @param[in] quant_info Quantization info corresponding to the @a buffer layer.
- * @param[in] shape Shape corresponding to the @a buffer layer.
- * @param[in] image_height The image height.
- * @param[in] image_width The image width.
- * @param[in] layer_anchors The layer anchors corresponding to layer receiving the @a buffer.
- * Each anchor is structured as {width, height} pairs.
- * @param[in] confidence_threshold User given confidence threshold for a bbox. A bbox will be consider as detection if the
- * (objectness * class_score) is higher then the confidence_threshold.
- * @param[in] num_of_classes The model's number of classes.
- * @param[in] should_sigmoid Indicates whether sigmoid() function should be performed on the @a buffer's data.
- * @param[inout] detections A vector of ::DetectionBbox objects, to add the detected bboxes to.
- *
- * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
- */
- template<typename HostType = float32_t, typename DeviceType>
- hailo_status extract_detections(const MemoryView &buffer, hailo_quant_info_t quant_info, hailo_3d_image_shape_t shape,
- const std::vector<int> &layer_anchors, std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
- {
- static const uint32_t X_INDEX = 0;
- static const uint32_t Y_INDEX = 1;
- static const uint32_t W_INDEX = 2;
- static const uint32_t H_INDEX = 3;
- static const uint32_t OBJECTNESS_INDEX = 4;
- static const uint32_t CLASSES_START_INDEX = 5;
-
- // Each layer anchors vector is structured as {w,h} pairs.
- // For example, if we have a vector of size 6 (default YOLOv5 vector) then we have 3 anchors for this layer.
- assert(layer_anchors.size() % 2 == 0);
- const size_t num_of_anchors = (layer_anchors.size() / 2);
-
- const uint32_t entry_size = CLASSES_START_INDEX + m_num_of_classes;
- auto number_of_entries = shape.height * shape.width * num_of_anchors;
- // TODO: this can also be part of the Op configuration
- auto buffer_size = number_of_entries * entry_size;
- CHECK(buffer_size == buffer.size(), HAILO_INVALID_ARGUMENT,
- "Failed to extract_detections, buffer_size should be {}, but is {}", buffer_size, buffer.size());
-
- auto *data = buffer.data();
- for (size_t row = 0; row < shape.height; row++) {
- for (size_t col = 0; col < shape.width; col++) {
- for (size_t anchor = 0; anchor < num_of_anchors; anchor++) {
- auto entry_idx = entry_size * (num_of_anchors * (shape.height * row + col) + anchor);
-
- auto objectness = apply_dequantization_activation<HostType, DeviceType>(data[entry_idx + OBJECTNESS_INDEX], quant_info, m_should_sigmoid);
- if (objectness < m_confidence_threshold) {
- continue;
- }
-
- auto tx = apply_dequantization_activation<HostType, DeviceType>(data[entry_idx + X_INDEX], quant_info, m_should_sigmoid);
- auto ty = apply_dequantization_activation<HostType, DeviceType>(data[entry_idx + Y_INDEX], quant_info, m_should_sigmoid);
- auto tw = apply_dequantization_activation<HostType, DeviceType>(data[entry_idx + W_INDEX], quant_info, m_should_sigmoid);
- auto th = apply_dequantization_activation<HostType, DeviceType>(data[entry_idx + H_INDEX], quant_info, m_should_sigmoid);
-
- // Source for the calculations - https://github.com/ultralytics/yolov5/blob/HEAD/models/yolo.py
- // Explanations for the calculations - https://github.com/ultralytics/yolov5/issues/471
- auto w = pow(2.0f * tw, 2.0f) * static_cast<float32_t>(layer_anchors[anchor * 2]) / m_image_width;
- auto h = pow(2.0f * th, 2.0f) * static_cast<float32_t>(layer_anchors[anchor * 2 + 1]) / m_image_height;
- auto x_center = (tx * 2.0f - 0.5f + static_cast<float32_t>(col)) / static_cast<float32_t>(shape.width);
- auto y_center = (ty * 2.0f - 0.5f + static_cast<float32_t>(row)) / static_cast<float32_t>(shape.height);
- auto x_min = (x_center - (w / 2.0f));
- auto y_min = (y_center - (h / 2.0f));
-
- if (m_one_class_per_bbox) {
- auto entry_classes_idx = entry_idx + CLASSES_START_INDEX;
- auto max_id_score_pair = get_max_class<HostType, DeviceType>(data, entry_classes_idx , objectness, quant_info);
- if (max_id_score_pair.second >= m_confidence_threshold) {
- detections.emplace_back(x_min, y_min, w, h, max_id_score_pair.second, max_id_score_pair.first);
- classes_detections_count[max_id_score_pair.first]++;
- }
- }
- else {
- for (uint32_t class_index = 0; class_index < m_num_of_classes; class_index++) {
- auto class_confidence = apply_dequantization_activation<HostType, DeviceType>(
- data[entry_idx + CLASSES_START_INDEX + class_index], quant_info, m_should_sigmoid);
- auto class_score = class_confidence * objectness;
- if (class_score >= m_confidence_threshold) {
- detections.emplace_back(x_min, y_min, w, h, class_score, class_index);
- classes_detections_count[class_index]++;
- }
- }
- }
- }
- }
- }
-
- return HAILO_SUCCESS;
- }
-
- /**
- * Computes the IOU ratio of @a box_1 and @a box_2
- */
- float compute_iou(const DetectionBbox &box_1, const DetectionBbox &box_2)
- {
- const float overlap_area_width = std::min(box_1.m_bbox.x_max, box_2.m_bbox.x_max) - std::max(box_1.m_bbox.x_min, box_2.m_bbox.x_min);
- const float overlap_area_height = std::min(box_1.m_bbox.y_max, box_2.m_bbox.y_max) - std::max(box_1.m_bbox.y_min, box_2.m_bbox.y_min);
- if (overlap_area_width <= 0.0f || overlap_area_height <= 0.0f) {
- return 0.0f;
- }
- const float intersection = overlap_area_width * overlap_area_height;
- const float box_1_area = (box_1.m_bbox.y_max - box_1.m_bbox.y_min) * (box_1.m_bbox.x_max - box_1.m_bbox.x_min);
- const float box_2_area = (box_2.m_bbox.y_max - box_2.m_bbox.y_min) * (box_2.m_bbox.x_max - box_2.m_bbox.x_min);
- const float union_area = (box_1_area + box_2_area - intersection);
-
- return (intersection / union_area);
- }
-
- /**
- * Removes overlapping boxes in @a detections by setting the class confidence to zero.
- *
- * @param[in] detections A vector of @a DetectionBbox containing the detections boxes after ::extract_detections() function.
- *
- */
- void remove_overlapping_boxes(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
- {
- std::sort(detections.begin(), detections.end(),
- [](DetectionBbox a, DetectionBbox b)
- { return a.m_bbox.score > b.m_bbox.score; });
-
- for (size_t i = 0; i < detections.size(); i++) {
- if (detections[i].m_bbox.score == REMOVED_CLASS_SCORE) {
- // Detection overlapped with a higher score detection
- continue;
- }
-
- for (size_t j = i + 1; j < detections.size(); j++) {
- if (detections[j].m_bbox.score == REMOVED_CLASS_SCORE) {
- // Detection overlapped with a higher score detection
- continue;
- }
-
- if ((detections[i].m_class_id == detections[j].m_class_id) &&
- (compute_iou(detections[i], detections[j]) >= m_iou_threshold)) {
- // Remove detections[j] if the iou is higher then the threshold
- detections[j].m_bbox.score = REMOVED_CLASS_SCORE;
- assert(classes_detections_count[detections[j].m_class_id] > 0);
- classes_detections_count[detections[j].m_class_id]--;
- }
- }
- }
- }
-
- /*
- * For each class the layout is
- * \code
- * struct (packed) {
- * uint16_t/float32_t bbox_count;
- * hailo_bbox_t/hailo_bbox_float32_t bbox[bbox_count];
- * };
- * \endcode
- */
- void fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
- std::vector<uint32_t> &classes_detections_count)
- {
- // Calculate the number of detections before each class, to help us later calculate the buffer_offset for it's detections.
- std::vector<uint32_t> num_of_detections_before;
- num_of_detections_before.reserve(m_num_of_classes);
- uint32_t ignored_detections_count = 0;
- for (size_t class_idx = 0; class_idx < m_num_of_classes; class_idx++) {
- if (classes_detections_count[class_idx] > m_max_bboxes_per_class) {
- ignored_detections_count += (classes_detections_count[class_idx] - m_max_bboxes_per_class);
- classes_detections_count[class_idx] = m_max_bboxes_per_class;
- }
-
- if (0 == class_idx) {
- num_of_detections_before[class_idx] = 0;
- }
- else {
- num_of_detections_before[class_idx] = num_of_detections_before[class_idx - 1] + classes_detections_count[class_idx - 1];
- }
-
- // Fill `bbox_count` value for class_idx in the result buffer
- float32_t bbox_count_casted = static_cast<float32_t>(classes_detections_count[class_idx]);
- auto buffer_offset = (class_idx * sizeof(bbox_count_casted)) + (num_of_detections_before[class_idx] * sizeof(hailo_bbox_float32_t));
- memcpy((buffer.data() + buffer_offset), &bbox_count_casted, sizeof(bbox_count_casted));
- }
-
- for (auto &detection : detections) {
- if (REMOVED_CLASS_SCORE == detection.m_bbox.score) {
- // Detection overlapped with a higher score detection and removed in remove_overlapping_boxes()
- continue;
- }
- if (0 == classes_detections_count[detection.m_class_id]) {
- // This class' detections count is higher then m_max_bboxes_per_class.
- // This detection is ignored due to having lower score (detections vector is sorted by score).
- continue;
- }
-
- auto buffer_offset = ((detection.m_class_id + 1) * sizeof(float32_t))
- + (num_of_detections_before[detection.m_class_id] * sizeof(hailo_bbox_float32_t));
-
- assert((buffer_offset + sizeof(hailo_bbox_float32_t)) <= buffer.size());
- memcpy((hailo_bbox_float32_t*)(buffer.data() + buffer_offset), &detection.m_bbox, sizeof(hailo_bbox_float32_t));
- num_of_detections_before[detection.m_class_id]++;
- classes_detections_count[detection.m_class_id]--;
- }
-
- if (0 != ignored_detections_count) {
- LOGGER__INFO("{} Detections were ignored, due to `max_bboxes_per_class` defined as {}.",
- ignored_detections_count, m_max_bboxes_per_class);
- }
- }
-
- hailo_status hailo_nms_format(std::vector<DetectionBbox> &&detections, MemoryView dst_view, std::vector<uint32_t> &classes_detections_count)
- {
- remove_overlapping_boxes(detections, classes_detections_count);
- fill_nms_format_buffer(dst_view, detections, classes_detections_count);
- return HAILO_SUCCESS;
- }
-
- std::vector<std::vector<int>> m_anchors;
- std::vector<hailo_3d_image_shape_t> m_shapes;
- std::vector<hailo_format_t> m_formants;
- std::vector<hailo_quant_info_t> m_quant_infos;
- float32_t m_image_height;
- float32_t m_image_width;
- float32_t m_confidence_threshold;
- float32_t m_iou_threshold;
- uint32_t m_num_of_classes;
- bool m_should_dequantize;
- uint32_t m_max_bboxes_per_class;
- bool m_should_sigmoid;
- bool m_one_class_per_bbox;
-};
-
-} /* namespace net_flow */
-} /* namespace hailort */
-
-#endif /* _HAILO_YOLO_POST_PROCESSING_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file inference_pipeline.cpp
+ * @brief Implemention of inference pipeline
+ **/
+
+#include "hailo/inference_pipeline.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/async_thread.hpp"
+
+#include "net_flow/pipeline/vstream_internal.hpp"
+#include "network_group/network_group_internal.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+
+#include <sstream>
+
+
+namespace hailort
+{
+
+InferVStreams::InferVStreams(std::vector<InputVStream> &&inputs, std::vector<OutputVStream> &&outputs, bool is_multi_context,
+ bool is_scheduled, uint16_t batch_size) :
+ m_inputs(std::move(inputs)),
+ m_outputs(std::move(outputs)),
+ m_is_multi_context(is_multi_context),
+ m_is_scheduled(is_scheduled),
+ m_batch_size(batch_size)
+{
+ for (auto &input : m_inputs) {
+ if (contains(m_network_name_to_input_count, input.network_name())) {
+ ++m_network_name_to_input_count[input.network_name()];
+ } else {
+ m_network_name_to_input_count.emplace(input.network_name(), 1);
+ }
+ }
+ for (auto &output : m_outputs) {
+ if (contains(m_network_name_to_output_count, output.network_name())) {
+ ++m_network_name_to_output_count[output.network_name()];
+ } else {
+ m_network_name_to_output_count.emplace(output.network_name(), 1);
+ }
+ }
+}
+
+hailo_status InferVStreams::verify_network_inputs_and_outputs(const std::map<std::string, MemoryView>& inputs_name_mem_view_map,
+ const std::map<std::string, MemoryView>& outputs_name_mem_view_map)
+{
+ std::map<std::string, std::pair<size_t, size_t>> input_output_count_per_network;
+
+ for (const auto &input_name_to_memview : inputs_name_mem_view_map) {
+ auto input_vstream = get_input_by_name(input_name_to_memview.first);
+ CHECK_EXPECTED_AS_STATUS(input_vstream);
+ auto network_name = input_vstream->get().network_name();
+ if (contains(input_output_count_per_network, network_name)) {
+ ++input_output_count_per_network[network_name].first;
+ } else {
+ input_output_count_per_network.emplace(network_name, std::pair<size_t, size_t>(1, 0));
+ }
+ }
+ for (const auto &output_name_to_memview : outputs_name_mem_view_map) {
+ auto output_vstream = get_output_by_name(output_name_to_memview.first);
+ CHECK_EXPECTED_AS_STATUS(output_vstream);
+ auto network_name = output_vstream->get().network_name();
+ if (contains(input_output_count_per_network, network_name)) {
+ ++input_output_count_per_network[network_name].second;
+ } else {
+ input_output_count_per_network.emplace(network_name, std::pair<size_t, size_t>(0, 1));
+ }
+ }
+ CHECK(!m_is_multi_context || (input_output_count_per_network.size() == m_network_name_to_input_count.size()), HAILO_INVALID_ARGUMENT,
+ "For multi-context network groups, inference is only supported on all available networks");
+
+ for (const auto &network_to_input_output_count : input_output_count_per_network) {
+ CHECK(network_to_input_output_count.second.first == m_network_name_to_input_count[network_to_input_output_count.first],
+ HAILO_INVALID_ARGUMENT, "Not all inputs have been provided for network {}", network_to_input_output_count.first);
+ CHECK(network_to_input_output_count.second.second == m_network_name_to_output_count[network_to_input_output_count.first],
+ HAILO_INVALID_ARGUMENT, "Not all outputs have been provided for network {}", network_to_input_output_count.first);
+ }
+ return HAILO_SUCCESS;
+}
+
+static hailo_status verify_vstream_params_in_vstream_infos(const std::map<std::string, hailo_vstream_params_t> ¶ms,
+ const std::vector<hailo_vstream_info_t> &vstream_infos)
+{
+ for (const auto &name_to_param : params) {
+ const auto &name = name_to_param.first;
+ bool found = false;
+ for (const auto &vstream_info : vstream_infos) {
+ if (vstream_info.name == name) {
+ found = true;
+ break;
+ }
+ }
+ CHECK(found, HAILO_NOT_FOUND, "Could not find vstream {}", name);
+ }
+ return HAILO_SUCCESS;
+}
+
+Expected<InferVStreams> InferVStreams::create(ConfiguredNetworkGroup &net_group,
+ const std::map<std::string, hailo_vstream_params_t> &input_params,
+ const std::map<std::string, hailo_vstream_params_t> &output_params)
+{
+ auto network_infos = net_group.get_network_infos();
+ CHECK_EXPECTED(network_infos);
+
+ auto is_multi_context = net_group.is_multi_context();
+ std::map<std::string, std::pair<size_t, size_t>> input_param_count_per_network;
+ size_t total_inputs_found = 0;
+ size_t total_outputs_found = 0;
+
+ uint16_t batch_size = 0;
+ if (is_multi_context) {
+ const auto &config_params = net_group.get_config_params();
+ batch_size = config_params.batch_size;
+
+ if (HAILO_DEFAULT_BATCH_SIZE == batch_size) {
+ uint16_t network_batch_size = config_params.network_params_by_name.begin()->second.batch_size;
+ for (const auto &name_params_pair : config_params.network_params_by_name) {
+ CHECK_AS_EXPECTED(network_batch_size == name_params_pair.second.batch_size, HAILO_INVALID_ARGUMENT,
+ "Batch size of each network must be the same!");
+ }
+
+ batch_size = network_batch_size;
+ }
+ }
+
+ if (HAILO_DEFAULT_BATCH_SIZE == batch_size) {
+ batch_size = DEFAULT_ACTUAL_BATCH_SIZE;
+ }
+
+ for (const auto &network_info : network_infos.value()) {
+ auto input_vstream_infos_per_network = net_group.get_input_vstream_infos(network_info.name);
+ CHECK_EXPECTED(input_vstream_infos_per_network);
+
+ size_t input_counter = 0;
+ for (const auto &vstream_info : input_vstream_infos_per_network.value()) {
+ if (contains(input_params, std::string(vstream_info.name))) {
+ ++input_counter;
+ ++total_inputs_found;
+ }
+ }
+
+ auto output_vstream_infos_per_network = net_group.get_output_vstream_infos(network_info.name);
+ CHECK_EXPECTED(output_vstream_infos_per_network);
+
+ size_t output_counter = 0;
+ for (const auto &vstream_info : output_vstream_infos_per_network.value()) {
+ if (contains(output_params, std::string(vstream_info.name))) {
+ ++output_counter;
+ ++total_outputs_found;
+ }
+ }
+
+ if ((0 != input_counter) || (0 != output_counter)) {
+ CHECK_AS_EXPECTED(input_counter == input_vstream_infos_per_network->size(), HAILO_INVALID_ARGUMENT,
+ "Found only partial inputs for network {}", network_info.name);
+ CHECK_AS_EXPECTED(output_counter == output_vstream_infos_per_network->size(), HAILO_INVALID_ARGUMENT,
+ "Found only partial outputs for network {}", network_info.name);
+ } else {
+ CHECK_AS_EXPECTED(!is_multi_context, HAILO_INVALID_ARGUMENT,
+ "For multi-context network groups, the pipeline must be created for all available networks");
+ }
+ }
+
+ if (total_inputs_found != input_params.size()) {
+ auto all_input_vstream_infos = net_group.get_input_vstream_infos();
+ CHECK_EXPECTED(all_input_vstream_infos);
+
+ auto status = verify_vstream_params_in_vstream_infos(input_params, all_input_vstream_infos.release());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+ if (total_outputs_found != output_params.size()) {
+ auto all_output_vstream_infos = net_group.get_output_vstream_infos();
+ CHECK_EXPECTED(all_output_vstream_infos);
+
+ auto status = verify_vstream_params_in_vstream_infos(output_params, all_output_vstream_infos.release());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ auto input_vstreams = VStreamsBuilder::create_input_vstreams(net_group, input_params);
+ CHECK_EXPECTED(input_vstreams);
+
+ auto output_vstreams = VStreamsBuilder::create_output_vstreams(net_group, output_params);
+ CHECK_EXPECTED(output_vstreams);
+
+ return InferVStreams(input_vstreams.release(), output_vstreams.release(), is_multi_context, net_group.is_scheduled(),
+ batch_size);
+}
+
+hailo_status InferVStreams::infer(const std::map<std::string, MemoryView>& input_data,
+ std::map<std::string, MemoryView>& output_data, size_t frames_count)
+{
+ auto status = verify_network_inputs_and_outputs(input_data, output_data);
+ CHECK_SUCCESS(status);
+
+ status = verify_memory_view_size(input_data, output_data, frames_count);
+ CHECK_SUCCESS(status);
+
+ status = verify_frames_count(frames_count);
+ CHECK_SUCCESS(status);
+
+ std::vector<AsyncThreadPtr<hailo_status>> results;
+
+ // Launch async read/writes
+ for (auto &input_name_to_data_pair : input_data) {
+ auto input_vstream_exp = get_input_by_name(input_name_to_data_pair.first);
+ CHECK_EXPECTED_AS_STATUS(input_vstream_exp);
+ auto &input_vstream = input_vstream_exp.release().get();
+ results.emplace_back(std::make_unique<AsyncThread<hailo_status>>(
+ [&input_vstream, &input_name_to_data_pair, frames_count]() -> hailo_status {
+ const auto &input_buffer = input_name_to_data_pair.second;
+ for (uint32_t i = 0; i < frames_count; i++) {
+ const size_t offset = i * input_vstream.get_frame_size();
+ auto status = input_vstream.write(MemoryView::create_const(
+ input_buffer.data() + offset,
+ input_vstream.get_frame_size()));
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__DEBUG("Input stream was aborted!");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+ }
+ ));
+ }
+ for (auto &output_name_to_data_pair : output_data) {
+ auto output_vstream_exp = get_output_by_name(output_name_to_data_pair.first);
+ CHECK_EXPECTED_AS_STATUS(output_vstream_exp);
+ auto &output_vstream = output_vstream_exp.release().get();
+ results.emplace_back(std::make_unique<AsyncThread<hailo_status>>(
+ [&output_vstream, &output_name_to_data_pair, frames_count]() {
+ for (size_t i = 0; i < frames_count; i++) {
+ auto status = output_vstream.read(MemoryView(output_name_to_data_pair.second.data() + i * output_vstream.get_frame_size(), output_vstream.get_frame_size()));
+ if (HAILO_SUCCESS != status) {
+ return status;
+ }
+ }
+ return HAILO_SUCCESS;
+ }
+ ));
+ }
+
+ // Wait for all results
+ auto error_status = HAILO_SUCCESS;
+ for (auto& result : results) {
+ status = result->get();
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ continue;
+ }
+ if (HAILO_SUCCESS != status) {
+ error_status = status;
+ LOGGER__ERROR("Failed waiting for threads with status {}", error_status);
+ }
+ }
+ if (HAILO_SUCCESS != error_status) {
+ return error_status;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status InferVStreams::verify_memory_view_size(const std::map<std::string, MemoryView>& inputs_name_mem_view_map,
+ const std::map<std::string, MemoryView>& outputs_name_mem_view_map, size_t frames_count)
+{
+ for (const auto &input_name_to_memview : inputs_name_mem_view_map) {
+ auto input_vstream_exp = get_input_by_name(input_name_to_memview.first);
+ CHECK_EXPECTED_AS_STATUS(input_vstream_exp);
+ auto &input_vstream = input_vstream_exp.release().get();
+ CHECK(frames_count * input_vstream.get_frame_size() == input_name_to_memview.second.size(), HAILO_INVALID_ARGUMENT,
+ "Memory size of vstream {} does not match the frame count! (Expected {}, got {})",
+ input_vstream.name(), frames_count * input_vstream.get_frame_size(), input_name_to_memview.second.size());
+ }
+ for (const auto &output_name_to_memview : outputs_name_mem_view_map) {
+ auto output_vstream_exp = get_output_by_name(output_name_to_memview.first);
+ CHECK_EXPECTED_AS_STATUS(output_vstream_exp);
+ auto &output_vstream = output_vstream_exp.release().get();
+ CHECK(frames_count * output_vstream.get_frame_size() == output_name_to_memview.second.size(), HAILO_INVALID_ARGUMENT,
+ "Memory size of vstream {} does not match the frame count! (Expected {}, got {})",
+ output_vstream.name(), frames_count * output_vstream.get_frame_size(), output_name_to_memview.second.size());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status InferVStreams::verify_frames_count(size_t frames_count)
+{
+ if (m_is_multi_context && !m_is_scheduled) {
+ CHECK((frames_count % m_batch_size) == 0, HAILO_INVALID_ARGUMENT,
+ "On the case of multi-context without the model scheduler, frames count must be a multiplier of the batch size! ({} % {} != 0)",
+ frames_count, m_batch_size);
+ }
+ return HAILO_SUCCESS;
+}
+
+Expected<std::reference_wrapper<InputVStream>> InferVStreams::get_input_by_name(const std::string &name)
+{
+ for (auto &input_vstream : m_inputs) {
+ if (input_vstream.name() == name) {
+ return std::ref(input_vstream);
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+Expected<std::reference_wrapper<OutputVStream>> InferVStreams::get_output_by_name(const std::string &name)
+{
+ for (auto &ouput_vstream : m_outputs) {
+ if (ouput_vstream.name() == name) {
+ return std::ref(ouput_vstream);
+ }
+ }
+ return make_unexpected(HAILO_NOT_FOUND);
+}
+
+std::vector<std::reference_wrapper<InputVStream>> InferVStreams::get_input_vstreams()
+{
+ std::vector<std::reference_wrapper<InputVStream>> vsterams_refs;
+ for (auto &input_vstream : m_inputs) {
+ vsterams_refs.push_back(std::ref(input_vstream));
+ }
+ return vsterams_refs;
+}
+
+std::vector<std::reference_wrapper<OutputVStream>> InferVStreams::get_output_vstreams()
+{
+ std::vector<std::reference_wrapper<OutputVStream>> vsterams_refs;
+ for (auto &ouput_vstream : m_outputs) {
+ vsterams_refs.push_back(std::ref(ouput_vstream));
+ }
+ return vsterams_refs;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file pipeline.cpp
+ * @brief Implemention of the pipeline
+ **/
+
+#include "common/utils.hpp"
+#include "common/runtime_statistics_internal.hpp"
+#include "common/os_utils.hpp"
+
+#include "net_flow/pipeline/pipeline.hpp"
+
+
+namespace hailort
+{
+
+PipelineBuffer::Metadata::Metadata(PipelineTimePoint start_time) :
+ m_start_time(start_time)
+{}
+
+PipelineBuffer::Metadata::Metadata() :
+ Metadata(PipelineTimePoint{})
+{}
+
+PipelineTimePoint PipelineBuffer::Metadata::get_start_time() const
+{
+ return m_start_time;
+}
+
+void PipelineBuffer::Metadata::set_start_time(PipelineTimePoint val)
+{
+ m_start_time = val;
+}
+
+PipelineBuffer::PipelineBuffer() :
+ PipelineBuffer(Type::DATA)
+{}
+
+PipelineBuffer::PipelineBuffer(Type type) :
+ m_type(type),
+ m_buffer(),
+ m_should_release_buffer(false),
+ m_pool(nullptr),
+ m_view(),
+ m_metadata()
+{}
+
+PipelineBuffer::PipelineBuffer(MemoryView view, bool should_measure) :
+ m_type(Type::DATA),
+ m_buffer(),
+ m_should_release_buffer(false),
+ m_pool(nullptr),
+ m_view(view),
+ m_metadata(Metadata(add_timestamp(should_measure)))
+{}
+
+PipelineBuffer::PipelineBuffer(Buffer &&buffer, BufferPoolPtr pool, bool should_measure) :
+ m_type(Type::DATA),
+ m_buffer(std::move(buffer)),
+ m_should_release_buffer(true),
+ m_pool(pool),
+ m_view(m_buffer),
+ m_metadata(Metadata(add_timestamp(should_measure)))
+{}
+
+PipelineBuffer::PipelineBuffer(PipelineBuffer &&other) :
+ m_type(other.m_type),
+ m_buffer(std::move(other.m_buffer)),
+ m_should_release_buffer(std::exchange(other.m_should_release_buffer, false)),
+ m_pool(std::move(other.m_pool)),
+ m_view(std::move(other.m_view)),
+ m_metadata(std::move(other.m_metadata))
+{}
+
+PipelineBuffer &PipelineBuffer::operator=(PipelineBuffer &&other)
+{
+ m_type = other.m_type,
+ m_buffer = std::move(other.m_buffer);
+ m_should_release_buffer = std::exchange(other.m_should_release_buffer, false);
+ m_pool = std::move(other.m_pool);
+ m_view = std::move(other.m_view);
+ m_metadata = std::move(other.m_metadata);
+ return *this;
+}
+
+PipelineBuffer::~PipelineBuffer()
+{
+ if (!m_should_release_buffer) {
+ return;
+ }
+
+ hailo_status status = m_pool->release_buffer(std::move(m_buffer));
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Releasing buffer in buffer pool failed! status = {}", status);
+ }
+}
+
+PipelineBuffer::operator bool() const
+{
+ return !m_view.empty();
+}
+
+uint8_t* PipelineBuffer::data()
+{
+ return m_view.data();
+}
+
+size_t PipelineBuffer::size() const
+{
+ return m_view.size();
+}
+
+MemoryView PipelineBuffer::as_view()
+{
+ return m_view;
+}
+
+PipelineBuffer::Type PipelineBuffer::get_type() const
+{
+ return m_type;
+}
+
+PipelineBuffer::Metadata PipelineBuffer::get_metadata() const
+{
+ return m_metadata;
+}
+
+void PipelineBuffer::set_metadata(Metadata &&val)
+{
+ m_metadata = std::move(val);
+}
+
+PipelineTimePoint PipelineBuffer::add_timestamp(bool should_measure)
+{
+ return should_measure ? std::chrono::steady_clock::now() : PipelineTimePoint{};
+}
+
+Expected<BufferPoolPtr> BufferPool::create(size_t buffer_size, size_t buffer_count, EventPtr shutdown_event,
+ hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags)
+{
+ AccumulatorPtr queue_size_accumulator = nullptr;
+ if ((elem_flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
+ queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
+ CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+ const bool measure_vstream_latency = (vstream_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0;
+
+ auto free_buffers = SpscQueue<Buffer>::create(buffer_count, shutdown_event, BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT);
+ CHECK_EXPECTED(free_buffers);
+
+ for (size_t i = 0; i < buffer_count; i++) {
+ auto buffer = Buffer::create(buffer_size);
+ CHECK_EXPECTED(buffer);
+
+ hailo_status status = free_buffers->enqueue(buffer.release());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ auto buffer_pool_ptr = make_shared_nothrow<BufferPool>(buffer_size, measure_vstream_latency,
+ free_buffers.release(), std::move(queue_size_accumulator));
+ CHECK_AS_EXPECTED(nullptr != buffer_pool_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return buffer_pool_ptr;
+}
+
+BufferPool::BufferPool(size_t buffer_size, bool measure_vstream_latency, SpscQueue<Buffer> &&free_buffers, AccumulatorPtr &&queue_size_accumulator) :
+ m_buffer_size(buffer_size),
+ m_measure_vstream_latency(measure_vstream_latency),
+ m_free_buffers(std::move(free_buffers)),
+ m_queue_size_accumulator(std::move(queue_size_accumulator))
+{}
+
+size_t BufferPool::buffer_size()
+{
+ return m_buffer_size;
+}
+
+Expected<PipelineBuffer> BufferPool::acquire_buffer(std::chrono::milliseconds timeout)
+{
+ if (nullptr != m_queue_size_accumulator) {
+ m_queue_size_accumulator->add_data_point(static_cast<double>(m_free_buffers.size_approx()));
+ }
+ auto buffer = m_free_buffers.dequeue(timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ return make_unexpected(buffer.status());
+ }
+ else if (HAILO_TIMEOUT == buffer.status()) {
+ LOGGER__WARNING(
+ "Failed to acquire buffer because the buffer pool is empty. This could be caused by uneven reading and writing speeds, with a short user-defined timeout. (timeout={}ms)",
+ timeout.count());
+ return make_unexpected(buffer.status());
+ }
+ CHECK_EXPECTED(buffer);
+ return PipelineBuffer(buffer.release(), shared_from_this(), m_measure_vstream_latency);
+}
+
+AccumulatorPtr BufferPool::get_queue_size_accumulator()
+{
+ return m_queue_size_accumulator;
+}
+
+Expected<PipelineBuffer> BufferPool::get_available_buffer(PipelineBuffer &&optional, std::chrono::milliseconds timeout)
+{
+ if (optional) {
+ CHECK_AS_EXPECTED(optional.size() == buffer_size(), HAILO_INVALID_OPERATION,
+ "Optional buffer size must be equal to pool buffer size. Optional buffer size = {}, buffer pool size = {}",
+ optional.size(), buffer_size());
+ return std::move(optional);
+ }
+
+ auto acquired_buffer = acquire_buffer(timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
+ return make_unexpected(acquired_buffer.status());
+ }
+ CHECK_EXPECTED(acquired_buffer, "Failed to acquire buffer with status={}", acquired_buffer.status());
+ return acquired_buffer.release();
+}
+
+hailo_status BufferPool::release_buffer(Buffer &&buffer)
+{
+ std::unique_lock<std::mutex> lock(m_release_buffer_mutex);
+ // This can be called after the shutdown event was signaled so we ignore it here
+ return m_free_buffers.enqueue(std::move(buffer), true);
+}
+
+Expected<DurationCollector> DurationCollector::create(hailo_pipeline_elem_stats_flags_t flags,
+ uint32_t num_frames_before_collection_start)
+{
+ AccumulatorPtr latency_accumulator = nullptr;
+ const auto measure_latency = should_measure_latency(flags);
+ if (measure_latency) {
+ latency_accumulator = make_shared_nothrow<FullAccumulator<double>>("latency");
+ CHECK_AS_EXPECTED(nullptr != latency_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ AccumulatorPtr average_fps_accumulator = nullptr;
+ const auto measure_average_fps = should_measure_average_fps(flags);
+ if (measure_average_fps) {
+ average_fps_accumulator = make_shared_nothrow<AverageFPSAccumulator<double>>("fps");
+ CHECK_AS_EXPECTED(nullptr != average_fps_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ return DurationCollector(measure_latency, measure_average_fps, std::move(latency_accumulator),
+ std::move(average_fps_accumulator), num_frames_before_collection_start);
+}
+
+DurationCollector::DurationCollector(bool measure_latency, bool measure_average_fps,
+ AccumulatorPtr &&latency_accumulator, AccumulatorPtr &&average_fps_accumulator,
+ uint32_t num_frames_before_collection_start) :
+ m_measure_latency(measure_latency),
+ m_measure_average_fps(measure_average_fps),
+ m_measure(m_measure_latency || m_measure_average_fps),
+ m_latency_accumulator(std::move(latency_accumulator)),
+ m_average_fps_accumulator(std::move(average_fps_accumulator)),
+ m_start(),
+ m_count(0),
+ m_num_frames_before_collection_start(num_frames_before_collection_start)
+{}
+
+void DurationCollector::start_measurement()
+{
+ if (!m_measure) {
+ return;
+ }
+
+ m_count++;
+ if (m_count < m_num_frames_before_collection_start) {
+ return;
+ }
+
+ m_start = std::chrono::steady_clock::now();
+}
+
+void DurationCollector::complete_measurement()
+{
+ if ((!m_measure) || (m_count < m_num_frames_before_collection_start)) {
+ return;
+ }
+
+ const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::chrono::steady_clock::now() - m_start).count();
+ if (m_measure_latency) {
+ m_latency_accumulator->add_data_point(duration_sec);
+ }
+
+ if (m_measure_average_fps) {
+ m_average_fps_accumulator->add_data_point(duration_sec);
+ }
+}
+
+AccumulatorPtr DurationCollector::get_latency_accumulator()
+{
+ return m_latency_accumulator;
+}
+
+AccumulatorPtr DurationCollector::get_average_fps_accumulator()
+{
+ return m_average_fps_accumulator;
+}
+
+bool DurationCollector::should_measure_latency(hailo_pipeline_elem_stats_flags_t flags)
+{
+ return (flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_LATENCY) != 0;
+}
+
+bool DurationCollector::should_measure_average_fps(hailo_pipeline_elem_stats_flags_t flags)
+{
+ return (flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_FPS) != 0;
+}
+
+PipelineObject::PipelineObject(const std::string &name) : m_name(name)
+{}
+
+const std::string &PipelineObject::name() const
+{
+ return m_name;
+}
+
+std::string PipelineObject::create_element_name(const std::string &element_name, const std::string &stream_name, uint8_t stream_index)
+{
+ std::stringstream name;
+ name << element_name << static_cast<uint32_t>(stream_index) << "_" << stream_name;
+ return name.str();
+}
+
+hailo_status PipelinePad::link_pads(std::shared_ptr<PipelineElement> left, std::shared_ptr<PipelineElement> right,
+ uint32_t left_source_index, uint32_t right_sink_index)
+{
+ CHECK_ARG_NOT_NULL(left);
+ CHECK_ARG_NOT_NULL(right);
+ return link_pads(*left, *right, left_source_index, right_sink_index);
+}
+
+hailo_status PipelinePad::link_pads(PipelineElement &left, PipelineElement &right, uint32_t left_source_index,
+ uint32_t right_sink_index)
+{
+ CHECK(left_source_index < left.sources().size(), HAILO_INVALID_ARGUMENT,
+ "Cannot link source pad #{} for PipelineElement '{}', it has only {} source pads.",
+ left_source_index, left.name(), left.sources().size());
+ CHECK(right_sink_index < right.sinks().size(), HAILO_INVALID_ARGUMENT,
+ "Cannot link sink pad #{} for PipelineElement '{}', it has only {} sink pads.",
+ right_sink_index, right.name(), right.sinks().size());
+ auto &left_source_pad = left.sources()[left_source_index];
+ auto &right_sink_pad = right.sinks()[right_sink_index];
+
+ left_source_pad.set_next(&right_sink_pad);
+ right_sink_pad.set_prev(&left_source_pad);
+
+ return HAILO_SUCCESS;
+}
+
+// Initial value of the counter
+uint32_t PipelinePad::index = 0;
+std::string PipelinePad::create_pad_name(const std::string &element_name, Type pad_type)
+{
+ std::stringstream string_stream;
+ const auto pad_type_name = (pad_type == Type::SINK) ? "sink" : "source";
+ string_stream << element_name << "(" << pad_type_name << index++ << ")";
+ return string_stream.str();
+}
+
+PipelinePad::PipelinePad(PipelineElement &element, const std::string &element_name, Type pad_type) :
+ PipelineObject(create_pad_name(element_name, pad_type)),
+ m_element(element),
+ m_next(nullptr),
+ m_prev(nullptr),
+ m_push_complete_callback(nullptr),
+ m_pull_complete_callback(nullptr)
+{}
+
+hailo_status PipelinePad::activate()
+{
+ return m_element.activate();
+}
+
+hailo_status PipelinePad::deactivate()
+{
+ return m_element.deactivate();
+}
+
+hailo_status PipelinePad::post_deactivate()
+{
+ return m_element.post_deactivate();
+}
+
+hailo_status PipelinePad::clear()
+{
+ return m_element.clear();
+}
+
+hailo_status PipelinePad::flush()
+{
+ return m_element.flush();
+}
+
+hailo_status PipelinePad::abort()
+{
+ return m_element.abort();
+}
+
+hailo_status PipelinePad::wait_for_finish()
+{
+ return m_element.wait_for_finish();
+}
+
+hailo_status PipelinePad::resume()
+{
+ return m_element.resume();
+}
+
+hailo_status PipelinePad::run_push(PipelineBuffer &&buffer)
+{
+ if (m_push_complete_callback) {
+ auto metadata = buffer.get_metadata();
+ const auto status = m_element.run_push(std::move(buffer));
+ m_push_complete_callback(metadata);
+ return status;
+ }
+
+ return m_element.run_push(std::move(buffer));
+}
+
+Expected<PipelineBuffer> PipelinePad::run_pull(PipelineBuffer &&optional)
+{
+ auto result = m_element.run_pull(std::move(optional), *this);
+ if (m_pull_complete_callback && result) {
+ m_pull_complete_callback(result->get_metadata());
+ }
+
+ return result;
+}
+
+void PipelinePad::set_push_complete_callback(PushCompleteCallback push_complete_callback)
+{
+ m_push_complete_callback = push_complete_callback;
+}
+
+void PipelinePad::set_pull_complete_callback(PullCompleteCallback pull_complete_callback)
+{
+ m_pull_complete_callback = pull_complete_callback;
+}
+
+void PipelinePad::set_next(PipelinePad *next)
+{
+ m_next = next;
+}
+
+void PipelinePad::set_prev(PipelinePad *prev)
+{
+ m_prev = prev;
+}
+
+PipelinePad *PipelinePad::next()
+{
+ return m_next;
+}
+
+PipelinePad *PipelinePad::prev()
+{
+ return m_prev;
+}
+
+PipelineElement &PipelinePad::element()
+{
+ return m_element;
+}
+
+const PipelinePad *PipelinePad::next() const
+{
+ return m_next;
+}
+
+const PipelinePad *PipelinePad::prev() const
+{
+ return m_prev;
+}
+
+const PipelineElement &PipelinePad::element() const
+{
+ return m_element;
+}
+
+SourceElement::SourceElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
+{
+ m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
+}
+
+PipelinePad &SourceElement::source()
+{
+ return m_sources[0];
+}
+
+SinkElement::SinkElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
+{
+ m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
+}
+
+PipelinePad &SinkElement::sink()
+{
+ return m_sinks[0];
+}
+
+IntermediateElement::IntermediateElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
+{
+ m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
+ m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
+}
+
+std::vector<PipelinePad*> IntermediateElement::execution_pads()
+{
+ std::vector<PipelinePad*> result{&next_pad()};
+ return result;
+}
+
+PipelineElement::PipelineElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ PipelineObject(name),
+ m_duration_collector(std::move(duration_collector)),
+ m_pipeline_status(std::move(pipeline_status)),
+ m_sinks(),
+ m_sources()
+{}
+
+AccumulatorPtr PipelineElement::get_fps_accumulator()
+{
+ return m_duration_collector.get_average_fps_accumulator();
+}
+
+AccumulatorPtr PipelineElement::get_latency_accumulator()
+{
+ return m_duration_collector.get_latency_accumulator();
+}
+
+std::vector<AccumulatorPtr> PipelineElement::get_queue_size_accumulators()
+{
+ return std::vector<AccumulatorPtr>();
+}
+
+std::vector<PipelinePad> &PipelineElement::sinks()
+{
+ return m_sinks;
+}
+
+std::vector<PipelinePad> &PipelineElement::sources()
+{
+ return m_sources;
+}
+
+const std::vector<PipelinePad> &PipelineElement::sinks() const
+{
+ return m_sinks;
+}
+
+const std::vector<PipelinePad> &PipelineElement::sources() const
+{
+ return m_sources;
+}
+
+std::string PipelineElement::description() const
+{
+ std::stringstream element_description;
+ element_description << "(" << this->name() << ")";
+ return element_description.str();
+}
+
+hailo_status PipelineElement::activate()
+{
+ return execute_activate();
+}
+
+hailo_status PipelineElement::deactivate()
+{
+ return execute_deactivate();
+}
+
+hailo_status PipelineElement::post_deactivate()
+{
+ return execute_post_deactivate();
+}
+
+hailo_status PipelineElement::clear()
+{
+ return execute_clear();
+}
+
+hailo_status PipelineElement::flush()
+{
+ return execute_flush();
+}
+
+hailo_status PipelineElement::abort()
+{
+ return execute_abort();
+}
+
+hailo_status PipelineElement::resume()
+{
+ return execute_resume();
+}
+
+hailo_status PipelineElement::wait_for_finish()
+{
+ return execute_wait_for_finish();
+}
+
+hailo_status PipelineElement::execute_activate()
+{
+ return execute([&](auto *pad){ return pad->activate(); });
+}
+
+hailo_status PipelineElement::execute_deactivate()
+{
+ return execute([&](auto *pad){ return pad->deactivate(); });
+}
+
+hailo_status PipelineElement::execute_post_deactivate()
+{
+ return execute([&](auto *pad){ return pad->post_deactivate(); });
+}
+
+hailo_status PipelineElement::execute_clear()
+{
+ return execute([&](auto *pad){ return pad->clear(); });
+}
+
+hailo_status PipelineElement::execute_flush()
+{
+ return execute([&](auto *pad){ return pad->flush(); });
+}
+
+hailo_status PipelineElement::execute_abort()
+{
+ return execute([&](auto *pad){ return pad->abort(); });
+}
+
+hailo_status PipelineElement::execute_resume()
+{
+ return execute([&](auto *pad){ return pad->resume(); });
+}
+
+hailo_status PipelineElement::execute_wait_for_finish()
+{
+ return execute([&](auto *pad){ return pad->wait_for_finish(); });
+}
+
+hailo_status PipelineElement::execute(std::function<hailo_status(PipelinePad*)> func)
+{
+ for (auto pad : execution_pads()) {
+ auto status = func(pad);
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+}
+
+std::vector<PipelinePad*> SourceElement::execution_pads()
+{
+ std::vector<PipelinePad*> result{&source()};
+ return result;
+}
+
+std::vector<PipelinePad*> SinkElement::execution_pads()
+{
+ std::vector<PipelinePad*> result{&sink()};
+ return result;
+}
+
+FilterElement::FilterElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status))
+{}
+
+hailo_status FilterElement::run_push(PipelineBuffer &&buffer)
+{
+ auto output = action(std::move(buffer), PipelineBuffer());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
+ return output.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(output);
+
+ hailo_status status = next_pad().run_push(output.release());
+ if (status == HAILO_SHUTDOWN_EVENT_SIGNALED) {
+ LOGGER__INFO("run_push of {} was shutdown!", name());
+ return status;
+ }
+ if (status == HAILO_STREAM_ABORTED_BY_USER) {
+ LOGGER__INFO("run_push of {} was aborted!", name());
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<PipelineBuffer> FilterElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
+{
+ auto buffer = next_pad().run_pull();
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ LOGGER__INFO("run_pull in FilterElement was shutdown!");
+ return make_unexpected(buffer.status());
+ }
+ CHECK_EXPECTED(buffer);
+ return action(buffer.release(), std::move(optional));
+}
+
+Expected<SpscQueue<PipelineBuffer>> BaseQueueElement::create_queue(size_t queue_size, EventPtr shutdown_event)
+{
+ auto queue = SpscQueue<PipelineBuffer>::create(queue_size, shutdown_event);
+ CHECK_EXPECTED(queue);
+
+ return queue.release();
+}
+
+BaseQueueElement::BaseQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ Event &&activation_event, Event &&deactivation_event) :
+ IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_queue(std::move(queue)),
+ m_shutdown_event(shutdown_event),
+ m_timeout(timeout),
+ m_is_thread_running(true),
+ m_activation_event(std::move(activation_event)),
+ m_deactivation_event(std::move(deactivation_event)),
+ m_queue_size_accumulator(std::move(queue_size_accumulator)),
+ m_is_run_in_thread_running(false)
+{}
+
+BaseQueueElement::~BaseQueueElement()
+{
+ LOGGER__INFO("Queue element {} has {} frames in his Queue on destruction", name(), m_queue.size_approx());
+}
+
+void BaseQueueElement::start_thread()
+{
+ m_thread = std::thread([this] () {
+ OsUtils::set_current_thread_name(thread_name());
+ while (m_is_thread_running.load()) {
+ auto status = m_activation_event.wait(INIFINITE_TIMEOUT());
+
+ if (!m_is_thread_running) {
+ LOGGER__INFO("Thread in element {} is not running anymore, exiting..", this->name());
+ break;
+ }
+ if (HAILO_SUCCESS == status) {
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_is_run_in_thread_running = true;
+ }
+ m_cv.notify_all();
+
+ status = run_in_thread();
+
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_is_run_in_thread_running = false;
+ }
+ m_cv.notify_all();
+ }
+
+ if (HAILO_SUCCESS != status) {
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED != status) {
+ // We do not want to log error for HAILO_STREAM_ABORTED_BY_USER
+ if (HAILO_STREAM_ABORTED_BY_USER != status) {
+ LOGGER__ERROR("Queue element {} run in thread function failed! status = {}", this->name(), status);
+ }
+
+ // Store the real error in pipeline_status
+ m_pipeline_status->store(status);
+
+ // Signal other threads to stop
+ hailo_status shutdown_status = m_shutdown_event->signal();
+ if (HAILO_SUCCESS != shutdown_status) {
+ LOGGER__CRITICAL("Failed shutting down queue with status {}", shutdown_status);
+ }
+ }
+ //Thread has done its execution. Mark to the thread to wait for activation again
+ hailo_status event_status = m_activation_event.reset();
+ if (HAILO_SUCCESS != event_status) {
+ LOGGER__CRITICAL("Failed reset activation event of element {}, with status {}", this->name(), event_status);
+ }
+
+ // Mark to deactivation function that the thread is done
+ event_status = m_deactivation_event.signal();
+ if (HAILO_SUCCESS != event_status) {
+ LOGGER__CRITICAL("Failed signaling deactivation event of element {}, with status {}", this->name(), event_status);
+ }
+ }
+ }
+ });
+}
+
+void BaseQueueElement::stop_thread()
+{
+ m_shutdown_event->signal();
+
+ // Mark thread as not running, then wake it in case it is waiting on m_activation_event
+ m_is_thread_running = false;
+ m_activation_event.signal();
+
+ if (m_thread.joinable()) {
+ m_thread.join();
+ }
+}
+
+std::vector<AccumulatorPtr> BaseQueueElement::get_queue_size_accumulators()
+{
+ if (nullptr == m_queue_size_accumulator) {
+ return std::vector<AccumulatorPtr>();
+ }
+ return {m_queue_size_accumulator};
+}
+
+hailo_status BaseQueueElement::execute_activate()
+{
+ hailo_status status = PipelineElement::execute_activate();
+ CHECK_SUCCESS(status);
+
+ status = m_activation_event.signal();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BaseQueueElement::execute_post_deactivate()
+{
+ hailo_status status = m_deactivation_event.wait(INIFINITE_TIMEOUT());
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to post_deactivate() in {} with status {}", name(), status);
+ }
+
+ status = m_deactivation_event.reset();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to reset of deactivation event in {} with status {}", name(), status);
+ }
+
+ return PipelineElement::execute_post_deactivate();
+}
+
+hailo_status BaseQueueElement::execute_clear()
+{
+ auto status = PipelineElement::execute_clear();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to clear() in {} with status {}", name(), status);
+ }
+
+ auto queue_status = m_queue.clear();
+ CHECK_SUCCESS(queue_status, "Failed to clear() queue in {} with status {}", name(), status);
+
+ return status;
+}
+
+hailo_status BaseQueueElement::execute_wait_for_finish()
+{
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_cv.wait(lock, [this] () {
+ return !m_is_run_in_thread_running;
+ });
+ return HAILO_SUCCESS;
+}
+
+hailo_status PushQueueElement::execute_abort()
+{
+ auto status = m_shutdown_event->reset();
+ CHECK_SUCCESS(status);
+ m_pipeline_status->store(HAILO_STREAM_ABORTED_BY_USER);
+ status = PipelineElement::execute_abort();
+ CHECK_SUCCESS(status);
+ return m_activation_event.signal();
+}
+
+hailo_status BaseQueueElement::execute_resume()
+{
+ auto status = m_shutdown_event->reset();
+ CHECK_SUCCESS(status);
+ m_pipeline_status->store(HAILO_SUCCESS);
+ status = PipelineElement::execute_resume();
+ CHECK_SUCCESS(status);
+ return m_activation_event.signal();
+}
+
+hailo_status BaseQueueElement::set_timeout(std::chrono::milliseconds timeout)
+{
+ m_timeout = timeout;
+ return HAILO_SUCCESS;
+}
+
+std::string BaseQueueElement::description() const
+{
+ std::stringstream element_description;
+
+ element_description << "(" << this->name();
+ if (HAILO_INFINITE != this->m_timeout.count()) {
+ element_description << " | timeout: " << std::chrono::duration_cast<std::chrono::seconds>(this->m_timeout).count() << "s";
+ }
+ element_description << ")";
+
+ return element_description.str();
+}
+
+hailo_status BaseQueueElement::pipeline_status()
+{
+ auto status = m_pipeline_status->load();
+
+ // We treat HAILO_STREAM_ABORTED_BY_USER as success because it is caused by user action (aborting streams)
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return HAILO_SUCCESS;
+ }
+ return status;
+}
+
+Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
+ size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
+ CHECK_EXPECTED(queue);
+
+ auto activation_event = Event::create(Event::State::not_signalled);
+ CHECK_EXPECTED(activation_event);
+
+ auto deactivation_event = Event::create(Event::State::not_signalled);
+ CHECK_EXPECTED(deactivation_event);
+
+ // TODO: Support fps/latency collection for queue elems (HRT-7711)
+ auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
+ CHECK_EXPECTED(duration_collector);
+
+ AccumulatorPtr queue_size_accumulator = nullptr;
+ if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
+ queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
+ CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ auto queue_ptr = make_shared_nothrow<PushQueueElement>(queue.release(), shutdown_event, name, timeout,
+ duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
+ activation_event.release(), deactivation_event.release());
+ CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PushQueueElement {} failed!", name);
+
+ LOGGER__INFO("Created {}", queue_ptr->name());
+
+ return queue_ptr;
+}
+
+Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return PushQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
+ vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
+}
+
+PushQueueElement::PushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ Event &&activation_event, Event &&deactivation_event) :
+ BaseQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
+ std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event))
+{
+ start_thread();
+}
+
+PushQueueElement::~PushQueueElement()
+{
+ stop_thread();
+}
+
+hailo_status PushQueueElement::run_push(PipelineBuffer &&buffer)
+{
+ // TODO: Support fps/latency collection for queue elems (HRT-7711)
+ if (nullptr != m_queue_size_accumulator) {
+ m_queue_size_accumulator->add_data_point(static_cast<double>(m_queue.size_approx()));
+ }
+ auto status = m_pipeline_status->load();
+ if (status == HAILO_STREAM_ABORTED_BY_USER) {
+ LOGGER__INFO("run_push of {} was aborted!", name());
+ return status;
+ }
+ CHECK_SUCCESS(m_pipeline_status->load());
+ status = m_queue.enqueue(std::move(buffer), m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ auto queue_thread_status = pipeline_status();
+ CHECK_SUCCESS(queue_thread_status,
+ "Shutdown event was signaled in enqueue of queue element {} because thread has failed with status={}!", name(),
+ queue_thread_status);
+ LOGGER__INFO("Shutdown event was signaled in enqueue of queue element {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ CHECK_SUCCESS(status);
+ return HAILO_SUCCESS;
+}
+
+Expected<PipelineBuffer> PushQueueElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
+{
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status PushQueueElement::execute_deactivate()
+{
+ // Mark to the threads that deactivate() was called.
+ hailo_status status = m_queue.enqueue(PipelineBuffer(PipelineBuffer::Type::DEACTIVATE));
+ if (HAILO_SUCCESS != status) {
+ // We want to deactivate source even if enqueue failed
+ auto deactivation_status = PipelineElement::execute_deactivate();
+ CHECK_SUCCESS(deactivation_status);
+ if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_SHUTDOWN_EVENT_SIGNALED == status)) {
+ LOGGER__INFO("enqueue() in element {} was aborted, got status = {}", name(), status);
+ }
+ else {
+ LOGGER__ERROR("enqueue() in element {} failed, got status = {}", name(), status);
+ return status;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+PipelinePad &PushQueueElement::next_pad()
+{
+ // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
+ return *m_sources[0].next();
+}
+
+hailo_status PushQueueElement::run_in_thread()
+{
+ auto buffer = m_queue.dequeue(INIFINITE_TIMEOUT());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ LOGGER__INFO("Shutdown event was signaled in dequeue of queue element {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ CHECK_EXPECTED_AS_STATUS(buffer);
+
+ // Return if deactivated
+ if (PipelineBuffer::Type::DEACTIVATE == buffer->get_type()) {
+ hailo_status status = m_shutdown_event->signal();
+ CHECK_SUCCESS(status);
+
+ status = next_pad().deactivate();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Deactivate of source in {} has failed with status {}", name(), status);
+ }
+
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+
+ hailo_status status = next_pad().run_push(buffer.release());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("run_push of {} was aborted!", name());
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
+ size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
+ CHECK_EXPECTED(queue);
+
+ auto activation_event = Event::create(Event::State::not_signalled);
+ CHECK_EXPECTED(activation_event);
+
+ auto deactivation_event = Event::create(Event::State::not_signalled);
+ CHECK_EXPECTED(deactivation_event);
+
+ // TODO: Support fps/latency collection for queue elems (HRT-7711)
+ auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
+ CHECK_EXPECTED(duration_collector);
+
+ AccumulatorPtr queue_size_accumulator = nullptr;
+ if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
+ queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
+ CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ auto queue_ptr = make_shared_nothrow<PullQueueElement>(queue.release(), shutdown_event, name, timeout,
+ duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
+ activation_event.release(), deactivation_event.release());
+ CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PullQueueElement {} failed!", name);
+
+ LOGGER__INFO("Created {}", queue_ptr->name());
+
+ return queue_ptr;
+}
+Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return PullQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
+ vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
+}
+
+PullQueueElement::PullQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ Event &&activation_event, Event &&deactivation_event) :
+ BaseQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
+ std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event))
+{
+ start_thread();
+}
+
+PullQueueElement::~PullQueueElement()
+{
+ stop_thread();
+}
+
+hailo_status PullQueueElement::run_push(PipelineBuffer &&/*buffer*/)
+{
+ return HAILO_INVALID_OPERATION;
+}
+
+Expected<PipelineBuffer> PullQueueElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*sink*/)
+{
+ // TODO: Support fps/latency collection for queue elems (HRT-7711)
+ CHECK_AS_EXPECTED(!optional, HAILO_INVALID_ARGUMENT, "Optional buffer is not allowed in queue element!");
+
+ if (nullptr != m_queue_size_accumulator) {
+ m_queue_size_accumulator->add_data_point(static_cast<double>(m_queue.size_approx()));
+ }
+ auto output = m_queue.dequeue(m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
+ auto queue_thread_status = pipeline_status();
+ CHECK_SUCCESS_AS_EXPECTED(queue_thread_status,
+ "Shutdown event was signaled in dequeue of queue element {} because thread has failed with status={}!", name(),
+ queue_thread_status);
+ LOGGER__INFO("Shutdown event was signaled in dequeue of queue element {}!", name());
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+ CHECK_EXPECTED(output);
+
+ return output;
+}
+
+hailo_status PullQueueElement::execute_deactivate()
+{
+ hailo_status status = PipelineElement::execute_deactivate();
+ auto shutdown_event_status = m_shutdown_event->signal();
+ CHECK_SUCCESS(status);
+ CHECK_SUCCESS(shutdown_event_status);
+
+ return HAILO_SUCCESS;
+}
+
+PipelinePad &PullQueueElement::next_pad()
+{
+ // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
+ return *m_sinks[0].prev();
+}
+
+hailo_status PullQueueElement::run_in_thread()
+{
+ auto buffer = next_pad().run_pull();
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ LOGGER__INFO("Shutdown event was signaled in run_pull of queue element {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == buffer.status()) {
+ LOGGER__INFO("run_pull of queue element {} was aborted!", name());
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+ if (HAILO_NETWORK_GROUP_NOT_ACTIVATED == buffer.status()) {
+ LOGGER__INFO("run_pull of queue element {} was called before network_group is activated!", name());
+ return HAILO_NETWORK_GROUP_NOT_ACTIVATED;
+ }
+ CHECK_EXPECTED_AS_STATUS(buffer);
+
+ hailo_status status = m_queue.enqueue(buffer.release(), INIFINITE_TIMEOUT());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ LOGGER__INFO("Shutdown event was signaled in enqueue of queue element {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
+ hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ auto pending_buffer_queue = BaseQueueElement::create_queue(1, shutdown_event);
+ CHECK_EXPECTED(pending_buffer_queue);
+
+ auto full_buffer_queue = BaseQueueElement::create_queue(1, shutdown_event);
+ CHECK_EXPECTED(full_buffer_queue);
+
+ auto activation_event = Event::create(Event::State::not_signalled);
+ CHECK_EXPECTED(activation_event);
+
+ auto deactivation_event = Event::create(Event::State::not_signalled);
+ CHECK_EXPECTED(deactivation_event);
+
+ // TODO: Support fps/latency collection for queue elems (HRT-7711)
+ auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
+ CHECK_EXPECTED(duration_collector);
+
+ AccumulatorPtr queue_size_accumulator = nullptr;
+ if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
+ queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
+ CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ auto queue_ptr = make_shared_nothrow<UserBufferQueueElement>(pending_buffer_queue.release(),
+ full_buffer_queue.release(), shutdown_event, name, timeout, duration_collector.release(),
+ std::move(queue_size_accumulator), std::move(pipeline_status), activation_event.release(),
+ deactivation_event.release());
+ CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating UserBufferQueueElement {} failed!", name);
+
+ LOGGER__INFO("Created {}", queue_ptr->name());
+
+ return queue_ptr;
+}
+
+Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return UserBufferQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
+ vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
+}
+
+UserBufferQueueElement::UserBufferQueueElement(SpscQueue<PipelineBuffer> &&queue, SpscQueue<PipelineBuffer> &&full_buffer_queue,
+ EventPtr shutdown_event, const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ Event &&activation_event, Event &&deactivation_event) :
+ PullQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector),
+ std::move(queue_size_accumulator), std::move(pipeline_status), std::move(activation_event),
+ std::move(deactivation_event)),
+ m_full_buffer_queue(std::move(full_buffer_queue))
+{}
+
+Expected<PipelineBuffer> UserBufferQueueElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
+{
+ // TODO: Support fps/latency collection for queue elems (HRT-7711)
+ CHECK_AS_EXPECTED(optional, HAILO_INVALID_ARGUMENT, "Optional buffer must be valid in {}!", name());
+
+ hailo_status status = m_queue.enqueue(std::move(optional), m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ LOGGER__INFO("Shutdown event was signaled in enqueue of queue element {}!", name());
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ if (nullptr != m_queue_size_accumulator) {
+ m_queue_size_accumulator->add_data_point(static_cast<double>(m_full_buffer_queue.size_approx()));
+ }
+ auto output = m_full_buffer_queue.dequeue(m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
+ LOGGER__INFO("Shutdown event was signaled in dequeue of queue element {}!", name());
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != output.status(), HAILO_TIMEOUT, "{} (D2H) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_timeout.count());
+ CHECK_EXPECTED(output);
+
+ CHECK_AS_EXPECTED(output->data() == optional.data(), HAILO_INTERNAL_FAILURE, "The buffer received in {} was not the same as the user buffer!", name());
+ return output;
+}
+
+hailo_status UserBufferQueueElement::execute_clear()
+{
+ auto status = PipelineElement::execute_clear();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to clear() in {} with status {}", name(), status);
+ }
+
+ auto queue_clear_status = m_full_buffer_queue.clear();
+ if (HAILO_SUCCESS != queue_clear_status) {
+ LOGGER__ERROR("Failed to clear() in {} with status {}", name(), queue_clear_status);
+ status = queue_clear_status;
+ }
+
+ queue_clear_status = m_queue.clear();
+ if (HAILO_SUCCESS != queue_clear_status) {
+ LOGGER__ERROR("Failed to clear() in {} with status {}", name(), queue_clear_status);
+ status = queue_clear_status;
+ }
+
+ return status;
+}
+
+hailo_status UserBufferQueueElement::run_in_thread()
+{
+ auto optional = m_queue.dequeue(INIFINITE_TIMEOUT());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == optional.status()) {
+ LOGGER__INFO("Shutdown event was signaled in dequeue of {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ CHECK_EXPECTED_AS_STATUS(optional);
+
+ auto buffer = next_pad().run_pull(optional.release());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ LOGGER__INFO("Shutdown event was signaled in run_pull of {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == buffer.status()) {
+ LOGGER__INFO("run_pull of {} was aborted!", name());
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+ CHECK_EXPECTED_AS_STATUS(buffer);
+
+ hailo_status status = m_full_buffer_queue.enqueue(buffer.release(), INIFINITE_TIMEOUT());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ LOGGER__INFO("Shutdown event was signaled in enqueue of {}!", name());
+ return HAILO_SHUTDOWN_EVENT_SIGNALED;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+BaseMuxElement::BaseMuxElement(size_t sink_count, const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ PipelineElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_timeout(timeout)
+{
+ m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
+ m_sinks.reserve(sink_count);
+ for (uint32_t i = 0; i < sink_count; ++i) {
+ m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
+ }
+}
+
+std::vector<PipelinePad*> BaseMuxElement::execution_pads()
+{
+ std::vector<PipelinePad*> result;
+ result.reserve(m_sinks.size());
+ for (auto& pad : m_sinks) {
+ result.push_back(pad.prev());
+ }
+ return result;
+}
+
+hailo_status BaseMuxElement::run_push(PipelineBuffer &&/*buffer*/)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<PipelineBuffer> BaseMuxElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
+{
+ std::vector<PipelineBuffer> inputs;
+ inputs.reserve(m_sinks.size());
+ for (auto &sink : m_sinks) {
+ auto buffer = sink.prev()->run_pull();
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ return make_unexpected(buffer.status());
+ }
+ CHECK_EXPECTED(buffer);
+
+ inputs.push_back(buffer.release());
+ }
+
+ auto output = action(std::move(inputs), std::move(optional));
+ CHECK_EXPECTED(output);
+
+ return output;
+}
+
+BaseDemuxElement::BaseDemuxElement(size_t source_count, const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ PipelineElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_timeout(timeout),
+ m_is_activated(false),
+ m_was_stream_aborted(false),
+ m_index_of_source(),
+ m_was_source_called(source_count, false),
+ m_buffers_for_action()
+{
+ m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
+ m_sources.reserve(source_count);
+ for (uint32_t i = 0; i < source_count; i++) {
+ m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
+ m_index_of_source[&m_sources[i]] = i;
+ }
+}
+
+hailo_status BaseDemuxElement::run_push(PipelineBuffer &&/*buffer*/)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<PipelineBuffer> BaseDemuxElement::run_pull(PipelineBuffer &&optional, const PipelinePad &source)
+{
+ CHECK_AS_EXPECTED(!optional, HAILO_INVALID_ARGUMENT, "Optional buffer is not allowed in demux element!");
+
+ // TODO: should we lock here? or only right before wait_for?
+ std::unique_lock<std::mutex> lock(m_mutex);
+ if (!m_is_activated) {
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+
+ m_was_source_called[m_index_of_source[&source]] = true;
+ if (were_all_sinks_called()) {
+ auto input = next_pad().run_pull();
+ if (HAILO_STREAM_ABORTED_BY_USER == input.status()) {
+ LOGGER__INFO("run_pull of demux element was aborted!");
+ m_was_stream_aborted = true;
+ lock.unlock();
+ m_cv.notify_all();
+ return make_unexpected(input.status());
+ }
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == input.status()) {
+ return make_unexpected(input.status());
+ }
+ CHECK_EXPECTED(input);
+
+ auto outputs = action(input.release());
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == outputs.status()) {
+ return make_unexpected(outputs.status());
+ }
+ CHECK_EXPECTED(outputs);
+
+ m_buffers_for_action = outputs.release();
+
+ for (uint32_t i = 0; i < m_was_source_called.size(); i++) {
+ m_was_source_called[i] = false;
+ }
+
+ // Manual unlocking is done before notifying, to avoid waking up the waiting thread only to block again
+ lock.unlock();
+ m_cv.notify_all();
+ } else {
+ auto cv_status = m_cv.wait_for(lock, m_timeout);
+ CHECK_AS_EXPECTED(std::cv_status::timeout != cv_status, HAILO_TIMEOUT, "Waiting for other threads in demux {} has reached a timeout (timeout={}ms)", name(), m_timeout.count());
+
+ if (m_was_stream_aborted) {
+ lock.unlock();
+ m_cv.notify_all();
+ return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
+ }
+
+ // We check if the element is not activated in case notify_all() was called from deactivate()
+ if (!m_is_activated) {
+ lock.unlock();
+ m_cv.notify_all();
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+ }
+
+ assert(m_index_of_source[&source] < m_buffers_for_action.size());
+ return std::move(m_buffers_for_action[m_index_of_source[&source]]);
+}
+
+bool BaseDemuxElement::were_all_sinks_called()
+{
+ return std::all_of(m_was_source_called.begin(), m_was_source_called.end(), [](bool v) { return v; });
+}
+
+hailo_status BaseDemuxElement::execute_activate()
+{
+ if (m_is_activated) {
+ return HAILO_SUCCESS;
+ }
+ m_is_activated = true;// TODO Should this always be true, no matter the status of source().activate()?
+ m_was_stream_aborted = false;
+ return PipelineElement::execute_activate();
+}
+
+hailo_status BaseDemuxElement::execute_deactivate()
+{
+ if (!m_is_activated) {
+ return HAILO_SUCCESS;
+ }
+ m_is_activated = false;
+
+ // deactivate should be called before mutex acquire and notify_all because it is possible that all queues are waiting on
+ // the run_pull of the source (HwRead) and the mutex is already acquired so this would prevent a timeout error
+ hailo_status status = PipelineElement::execute_deactivate();
+
+ {
+ // There is a case where the other thread is halted (via context switch) before the wait_for() function,
+ // then we call notify_all() here, and then the wait_for() is called - resulting in a timeout.
+ // notify_all() only works on threads which are already waiting, so that's why we acquire the lock here.
+ std::unique_lock<std::mutex> lock(m_mutex);
+ }
+ m_cv.notify_all();
+
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BaseDemuxElement::execute_post_deactivate()
+{
+ for (uint32_t i = 0; i < m_was_source_called.size(); i++) {
+ m_was_source_called[i] = false;
+ }
+ return PipelineElement::execute_post_deactivate();
+}
+
+hailo_status BaseDemuxElement::execute_abort()
+{
+ return PipelineElement::execute_abort();
+}
+
+PipelinePad &BaseDemuxElement::next_pad()
+{
+ // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
+ return *m_sinks[0].prev();
+}
+
+hailo_status BaseDemuxElement::set_timeout(std::chrono::milliseconds timeout)
+{
+ m_timeout = timeout;
+ return HAILO_SUCCESS;
+}
+
+std::vector<PipelinePad*> BaseDemuxElement::execution_pads()
+{
+ std::vector<PipelinePad*> result{&next_pad()};
+ return result;
+}
+
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file pipeline.hpp
+ * @brief Hailo Infer Pipeline
+ **/
+
+#ifndef _HAILO_PIPELINE_HPP_
+#define _HAILO_PIPELINE_HPP_
+
+#include "hailo/buffer.hpp"
+#include "hailo/runtime_statistics.hpp"
+
+#include "utils/thread_safe_queue.hpp"
+
+#include <memory>
+#include <thread>
+#include <sstream>
+#include <functional>
+
+
+namespace hailort
+{
+
+using PipelineTimePoint = std::chrono::steady_clock::time_point;
+#define BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT (std::chrono::milliseconds(10000))
+#define DEFAULT_NUM_FRAMES_BEFORE_COLLECTION_START (100)
+
+class BufferPool;
+using BufferPoolPtr = std::shared_ptr<BufferPool>;
+
+class PipelineBuffer final
+{
+public:
+ class Metadata final
+ {
+ public:
+ explicit Metadata(PipelineTimePoint start_time);
+ // Creates an empty metadata object
+ Metadata();
+
+ ~Metadata() = default;
+ Metadata(const Metadata &) = default;
+ Metadata &operator=(const Metadata &) = delete;
+ Metadata(Metadata &&other) = default;
+ Metadata &operator=(Metadata &&other) = default;
+
+ PipelineTimePoint get_start_time() const;
+ void set_start_time(PipelineTimePoint val);
+
+ private:
+ PipelineTimePoint m_start_time;
+ };
+
+ enum class Type {
+ DATA = 0,
+ FLUSH,
+ DEACTIVATE
+ };
+
+ // Creates an empty PipelineBuffer (with no buffer/memory view)
+ PipelineBuffer();
+ PipelineBuffer(Type type);
+ PipelineBuffer(MemoryView view, bool should_measure = false);
+ PipelineBuffer(Buffer &&buffer, BufferPoolPtr pool, bool should_measure = false);
+ ~PipelineBuffer();
+
+ PipelineBuffer(const PipelineBuffer &) = delete;
+ PipelineBuffer &operator=(const PipelineBuffer &) = delete;
+ PipelineBuffer(PipelineBuffer &&other);
+ PipelineBuffer &operator=(PipelineBuffer &&other);
+ explicit operator bool() const;
+
+ uint8_t* data();
+ size_t size() const;
+ MemoryView as_view();
+ Type get_type() const;
+ Metadata get_metadata() const;
+ void set_metadata(Metadata &&val);
+
+private:
+ Type m_type;
+ Buffer m_buffer;
+ bool m_should_release_buffer;
+ BufferPoolPtr m_pool;
+ MemoryView m_view;
+ Metadata m_metadata;
+
+ static PipelineTimePoint add_timestamp(bool should_measure);
+};
+
+// The buffer pool has to be created as a shared pointer (via the create function) because we use shared_from_this(),
+// which is only allowed if there is already a shared pointer pointing to "this"!
+class BufferPool : public std::enable_shared_from_this<BufferPool>
+{
+public:
+ static Expected<BufferPoolPtr> create(size_t buffer_size, size_t buffer_count, EventPtr shutdown_event,
+ hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags);
+ BufferPool(size_t buffer_size, bool measure_vstream_latency, SpscQueue<Buffer> &&free_buffers, AccumulatorPtr &&queue_size_accumulator);
+ virtual ~BufferPool() = default;
+
+ size_t buffer_size();
+ Expected<PipelineBuffer> acquire_buffer(std::chrono::milliseconds timeout);
+ AccumulatorPtr get_queue_size_accumulator();
+ Expected<PipelineBuffer> get_available_buffer(PipelineBuffer &&optional, std::chrono::milliseconds timeout);
+
+private:
+ hailo_status release_buffer(Buffer &&buffer);
+
+ const size_t m_buffer_size;
+ const bool m_measure_vstream_latency;
+ SpscQueue<Buffer> m_free_buffers;
+ AccumulatorPtr m_queue_size_accumulator;
+ std::mutex m_release_buffer_mutex;
+
+ friend class PipelineBuffer;
+};
+
+class DurationCollector final
+{
+public:
+ // TODO: HRT-4258
+ // Note: We start measuring the FPS/latency after num_frames_before_collection_start calls to start_measurement +
+ // complete_measurement. This is to allow the vstream pipeline to stabilize. Thus we ignore invalid
+ // measurements that are due to buffering that occours when the pipeline starts.
+ static Expected<DurationCollector> create(hailo_pipeline_elem_stats_flags_t flags,
+ uint32_t num_frames_before_collection_start = DEFAULT_NUM_FRAMES_BEFORE_COLLECTION_START);
+ DurationCollector(const DurationCollector &) = delete;
+ DurationCollector(DurationCollector &&other) = default;
+ DurationCollector &operator=(const DurationCollector &) = delete;
+ DurationCollector &operator=(DurationCollector &&other) = delete;
+ ~DurationCollector() = default;
+
+ void start_measurement();
+ void complete_measurement();
+
+ // latency_accumulator will measure latency in seconds
+ AccumulatorPtr get_latency_accumulator();
+ // average_fps_accumulator will measure fps in seconds^-1
+ AccumulatorPtr get_average_fps_accumulator();
+
+private:
+ DurationCollector(bool measure_latency, bool measure_average_fps,
+ AccumulatorPtr &&latency_accumulator, AccumulatorPtr &&average_fps_accumulator,
+ uint32_t num_frames_before_collection_start);
+ static bool should_measure_latency(hailo_pipeline_elem_stats_flags_t flags);
+ static bool should_measure_average_fps(hailo_pipeline_elem_stats_flags_t flags);
+
+ const bool m_measure_latency;
+ const bool m_measure_average_fps;
+ const bool m_measure;
+ AccumulatorPtr m_latency_accumulator;
+ AccumulatorPtr m_average_fps_accumulator;
+ PipelineTimePoint m_start;
+ size_t m_count;
+ const size_t m_num_frames_before_collection_start;
+};
+
+class PipelineObject
+{
+public:
+ PipelineObject(const std::string &name);
+ virtual ~PipelineObject() = default;
+ PipelineObject(PipelineObject &&) noexcept = default;
+ PipelineObject& operator=(PipelineObject &&) noexcept = default;
+
+ const std::string &name() const;
+
+ static std::string create_element_name(const std::string &element_name, const std::string &stream_name, uint8_t stream_index);
+
+private:
+ std::string m_name;
+};
+
+class PipelineElement;
+using PushCompleteCallback = std::function<void(const PipelineBuffer::Metadata&)>;
+using PullCompleteCallback = std::function<void(const PipelineBuffer::Metadata&)>;
+
+class PipelinePad final : public PipelineObject
+{
+public:
+ enum class Type
+ {
+ SOURCE,
+ SINK
+ };
+
+ // Link left's source pad (left->sources()[left_source_index]) with right's sink pad (right->right()[right_sink_index])
+ static hailo_status link_pads(std::shared_ptr<PipelineElement> left, std::shared_ptr<PipelineElement> right,
+ uint32_t left_source_index = 0, uint32_t right_sink_index = 0);
+ // Link left's source pad (left.sources()[left_source_index]) with right's sink pad (right.right()[right_sink_index])
+ static hailo_status link_pads(PipelineElement &left, PipelineElement &right, uint32_t left_source_index = 0,
+ uint32_t right_sink_index = 0);
+ static std::string create_pad_name(const std::string &element_name, Type pad_type);
+
+ PipelinePad(PipelineElement &element, const std::string &element_name, Type pad_type);
+ PipelinePad(const PipelinePad &) = delete;
+ PipelinePad(PipelinePad &&other) = default;
+ PipelinePad &operator=(const PipelinePad &) = delete;
+ PipelinePad &operator=(PipelinePad &&other) = delete;
+ ~PipelinePad() = default;
+
+ hailo_status activate();
+ hailo_status deactivate();
+ hailo_status post_deactivate();
+ hailo_status clear();
+ hailo_status flush();
+ hailo_status abort();
+ hailo_status wait_for_finish();
+ hailo_status resume();
+ virtual hailo_status run_push(PipelineBuffer &&buffer);
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional = PipelineBuffer());
+ void set_push_complete_callback(PushCompleteCallback push_complete_callback);
+ void set_pull_complete_callback(PullCompleteCallback pull_complete_callback);
+ void set_next(PipelinePad *next);
+ void set_prev(PipelinePad *prev);
+ PipelinePad *next();
+ PipelinePad *prev();
+ PipelineElement &element();
+ const PipelinePad *next() const;
+ const PipelinePad *prev() const;
+ const PipelineElement &element() const;
+
+protected:
+ PipelineElement &m_element;
+ PipelinePad *m_next;
+ PipelinePad *m_prev;
+ PushCompleteCallback m_push_complete_callback;
+ PullCompleteCallback m_pull_complete_callback;
+
+private:
+ // Automatic naming isn't thread safe
+ static uint32_t index;
+};
+
+// Note: PipelinePads accept 'PipelineElement &' in their ctor. PipelineElements can pass "*this" to their
+// PipelinePads (sources/sinks) in the PipelineElement ctor. This is OK because the ctor of PipelinePad
+// does nothing with the element reference other than setting it as class member.
+class PipelineElement : public PipelineObject
+{
+public:
+ PipelineElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual ~PipelineElement() = default;
+
+ PipelineElement(PipelineElement &&other) = delete;
+ PipelineElement(const PipelineElement &) = delete;
+ PipelineElement &operator=(const PipelineElement &) = delete;
+ PipelineElement &operator=(PipelineElement &&other) = delete;
+
+ hailo_status activate();
+ hailo_status deactivate();
+ hailo_status post_deactivate();
+ hailo_status clear();
+ hailo_status flush();
+ hailo_status abort();
+ hailo_status resume();
+ hailo_status wait_for_finish();
+ virtual hailo_status run_push(PipelineBuffer &&buffer) = 0;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) = 0;
+ AccumulatorPtr get_fps_accumulator();
+ AccumulatorPtr get_latency_accumulator();
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators();
+ std::vector<PipelinePad> &sinks();
+ std::vector<PipelinePad> &sources();
+ const std::vector<PipelinePad> &sinks() const;
+ const std::vector<PipelinePad> &sources() const;
+ virtual std::string description() const;
+
+ virtual void set_on_cant_pull_callback(std::function<void()> callback)
+ {
+ m_cant_pull_callback = callback;
+ }
+
+ virtual void set_on_can_pull_callback(std::function<void()> callback)
+ {
+ m_can_pull_callback = callback;
+ }
+
+protected:
+ DurationCollector m_duration_collector;
+ std::shared_ptr<std::atomic<hailo_status>> m_pipeline_status;
+ std::vector<PipelinePad> m_sinks;
+ std::vector<PipelinePad> m_sources;
+
+ std::function<void()> m_cant_pull_callback;
+ std::function<void()> m_can_pull_callback;
+
+ virtual std::vector<PipelinePad*> execution_pads() = 0;
+ virtual hailo_status execute_activate();
+ virtual hailo_status execute_deactivate();
+ virtual hailo_status execute_post_deactivate();
+ virtual hailo_status execute_clear();
+ virtual hailo_status execute_flush();
+ virtual hailo_status execute_abort();
+ virtual hailo_status execute_resume();
+ virtual hailo_status execute_wait_for_finish();
+
+ virtual hailo_status execute(std::function<hailo_status(PipelinePad*)>);
+};
+
+// An element with one source pad only (generates data)
+class SourceElement : public PipelineElement
+{
+public:
+ SourceElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ PipelinePad &source();
+
+protected:
+ virtual std::vector<PipelinePad*> execution_pads() override;
+};
+
+// An element with one sink pad only (consumes data)
+class SinkElement : public PipelineElement
+{
+public:
+ SinkElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ PipelinePad &sink();
+
+protected:
+ virtual std::vector<PipelinePad*> execution_pads() override;
+};
+
+// Transfers data from one pad to another pad. Has one sink pad and one source pad.
+class IntermediateElement : public PipelineElement
+{
+public:
+ IntermediateElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual PipelinePad &next_pad() = 0;
+
+protected:
+ virtual std::vector<PipelinePad*> execution_pads() override;
+};
+
+class FilterElement : public IntermediateElement
+{
+public:
+ FilterElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual ~FilterElement() = default;
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+
+protected:
+ // The optional buffer functions as an output buffer that the user can write to instead of acquiring a new buffer
+ virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) = 0;
+};
+
+class BaseQueueElement : public IntermediateElement
+{
+public:
+ virtual ~BaseQueueElement();
+
+ hailo_status set_timeout(std::chrono::milliseconds timeout);
+ virtual std::string description() const override;
+
+ static constexpr auto INIFINITE_TIMEOUT() { return std::chrono::milliseconds(HAILO_INFINITE); }
+
+protected:
+ static Expected<SpscQueue<PipelineBuffer>> create_queue(size_t queue_size, EventPtr shutdown_event);
+ BaseQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ Event &&activation_event, Event &&deactivation_event);
+
+ hailo_status pipeline_status();
+
+ virtual hailo_status execute_activate() override;
+ virtual hailo_status execute_post_deactivate() override;
+ virtual hailo_status execute_clear() override;
+ virtual hailo_status execute_resume() override;
+ virtual hailo_status execute_wait_for_finish() override;
+
+ /// Starts/stops the queue thread. This functions needs to be called on subclasses ctor and dtor
+ /// accordingly because otherwise, if we will start/stop thread in this class we will face pure-call
+ /// to `run_in_thread`.
+ /// This functions don't return status because they are meant to be called on ctor and dtor
+ void start_thread();
+ void stop_thread();
+
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
+
+ virtual hailo_status run_in_thread() = 0;
+ virtual std::string thread_name() = 0;
+
+ SpscQueue<PipelineBuffer> m_queue;
+ EventPtr m_shutdown_event;
+ std::chrono::milliseconds m_timeout;
+ std::thread m_thread;
+ std::atomic_bool m_is_thread_running;
+ Event m_activation_event;
+ Event m_deactivation_event;
+ AccumulatorPtr m_queue_size_accumulator;
+ std::atomic_bool m_is_run_in_thread_running;
+ std::condition_variable m_cv;
+ std::mutex m_mutex;
+};
+
+class PushQueueElement : public BaseQueueElement
+{
+public:
+ static Expected<std::shared_ptr<PushQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
+ size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<PushQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ PushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
+ virtual ~PushQueueElement();
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+ virtual PipelinePad &next_pad() override;
+
+protected:
+ virtual hailo_status execute_deactivate() override;
+ virtual hailo_status run_in_thread() override;
+ virtual std::string thread_name() override { return "PUSH_QUEUE"; };
+ virtual hailo_status execute_abort() override;
+};
+
+class PullQueueElement : public BaseQueueElement
+{
+public:
+ static Expected<std::shared_ptr<PullQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
+ size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<PullQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ PullQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
+ virtual ~PullQueueElement();
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+ virtual PipelinePad &next_pad() override;
+
+ virtual void set_on_cant_pull_callback(std::function<void()> callback) override
+ {
+ m_cant_pull_callback = callback;
+ m_queue.set_on_cant_enqueue_callback([this] () {
+ m_cant_pull_callback();
+ });
+ }
+
+ virtual void set_on_can_pull_callback(std::function<void()> callback) override
+ {
+ m_can_pull_callback = callback;
+ m_queue.set_on_can_enqueue_callback([this] () {
+ m_can_pull_callback();
+ });
+ }
+
+protected:
+ virtual hailo_status execute_deactivate() override;
+ virtual hailo_status run_in_thread() override;
+ virtual std::string thread_name() override { return "PULL_QUEUE"; };
+};
+
+class UserBufferQueueElement : public PullQueueElement
+{
+public:
+ static Expected<std::shared_ptr<UserBufferQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
+ hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<UserBufferQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ UserBufferQueueElement(SpscQueue<PipelineBuffer> &&queue, SpscQueue<PipelineBuffer> &&full_buffer_queue, EventPtr shutdown_event,
+ const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
+
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+
+ virtual void set_on_cant_pull_callback(std::function<void()> callback) override
+ {
+ m_cant_pull_callback = callback;
+ }
+
+ virtual void set_on_can_pull_callback(std::function<void()> callback) override
+ {
+ m_can_pull_callback = callback;
+ }
+
+protected:
+ virtual hailo_status execute_clear() override;
+ virtual hailo_status run_in_thread() override;
+
+private:
+ SpscQueue<PipelineBuffer> m_full_buffer_queue;
+};
+
+class BaseMuxElement : public PipelineElement
+{
+public:
+ BaseMuxElement(size_t sink_count, const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual ~BaseMuxElement() = default;
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+
+protected:
+ virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) = 0;
+ virtual std::vector<PipelinePad*> execution_pads() override;
+
+ std::chrono::milliseconds m_timeout;
+};
+
+class BaseDemuxElement : public PipelineElement
+{
+public:
+ BaseDemuxElement(size_t source_count, const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual ~BaseDemuxElement() = default;
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+ hailo_status set_timeout(std::chrono::milliseconds timeout);
+
+protected:
+ virtual hailo_status execute_activate() override;
+ virtual hailo_status execute_deactivate() override;
+ virtual hailo_status execute_post_deactivate() override;
+ virtual hailo_status execute_abort() override;
+ virtual Expected<std::vector<PipelineBuffer>> action(PipelineBuffer &&input) = 0;
+ virtual std::vector<PipelinePad*> execution_pads() override;
+
+ std::chrono::milliseconds m_timeout;
+
+private:
+ bool were_all_sinks_called();
+ PipelinePad &next_pad();
+
+ std::atomic_bool m_is_activated;
+ std::atomic_bool m_was_stream_aborted;
+ std::unordered_map<const PipelinePad*, uint32_t> m_index_of_source;
+ std::vector<bool> m_was_source_called;
+ std::vector<PipelineBuffer> m_buffers_for_action;
+ std::mutex m_mutex;
+ std::condition_variable m_cv;
+};
+
+enum class AccumulatorType
+{
+ FPS,
+ LATENCY,
+ QUEUE_SIZE
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_PIPELINE_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vstream.cpp
+ * @brief Implementation of the virtual stream
+ **/
+
+#include "hailo/vstream.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/runtime_statistics_internal.hpp"
+
+#include "net_flow/pipeline/vstream_internal.hpp"
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+#include "rpc/rpc_definitions.hpp"
+#include "service/rpc_client_utils.hpp"
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+#include <unordered_set>
+
+
+namespace hailort
+{
+
+static std::map<std::string, AccumulatorPtr> get_pipeline_accumulators_by_type(
+ const std::vector<std::shared_ptr<PipelineElement>> &pipeline, AccumulatorType accumulator_type);
+
+static std::map<std::string, std::vector<AccumulatorPtr>> get_pipeline_queue_size_accumulators(
+ const std::vector<std::shared_ptr<PipelineElement>> &pipeline);
+
+Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
+ const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
+ hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ auto transform_context = InputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format,
+ dst_quant_info);
+ CHECK_EXPECTED(transform_context, "Failed Creating InputTransformContext");
+
+ auto buffer_pool = BufferPool::create(transform_context.value()->get_dst_frame_size(), buffer_pool_size, shutdown_event, elem_flags,
+ vstream_flags);
+ CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto pre_infer_elem_ptr = make_shared_nothrow<PreInferElement>(transform_context.release(),
+ buffer_pool.release(), name, timeout, duration_collector.release(), std::move(pipeline_status));
+ CHECK_AS_EXPECTED(nullptr != pre_infer_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", pre_infer_elem_ptr->name());
+
+ return pre_infer_elem_ptr;
+}
+
+Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const std::string &name,
+ const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return PreInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info, name,
+ std::chrono::milliseconds(vstream_params.timeout_ms), vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags,
+ vstream_params.vstream_stats_flags, shutdown_event, pipeline_status);
+}
+
+PreInferElement::PreInferElement(std::unique_ptr<InputTransformContext> &&transform_context, BufferPoolPtr buffer_pool,
+ const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_transform_context(std::move(transform_context)),
+ m_pool(buffer_pool),
+ m_timeout(timeout)
+{}
+
+Expected<PipelineBuffer> PreInferElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
+{
+ LOGGER__ERROR("PreInferElement does not support run_pull operation");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+std::vector<AccumulatorPtr> PreInferElement::get_queue_size_accumulators()
+{
+ if (nullptr == m_pool->get_queue_size_accumulator()) {
+ return std::vector<AccumulatorPtr>();
+ }
+ return {m_pool->get_queue_size_accumulator()};
+}
+
+PipelinePad &PreInferElement::next_pad()
+{
+ // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
+ return *m_sources[0].next();
+}
+
+std::string PreInferElement::description() const
+{
+ std::stringstream element_description;
+ element_description << "(" << this->name() << " | " << m_transform_context->description() << ")";
+ return element_description.str();
+}
+
+Expected<PipelineBuffer> PreInferElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
+{
+ if (PipelineBuffer::Type::FLUSH == input.get_type()) {
+ return std::move(input);
+ }
+
+ auto transformed_buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == transformed_buffer.status()) {
+ return make_unexpected(transformed_buffer.status());
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != transformed_buffer.status(), HAILO_TIMEOUT,
+ "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_timeout.count());
+ CHECK_EXPECTED(transformed_buffer);
+
+ auto dst = transformed_buffer->as_view();
+ m_duration_collector.start_measurement();
+ const auto status = m_transform_context->transform(input.as_view(), dst);
+ m_duration_collector.complete_measurement();
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // Note: The latency to be measured starts as the input buffer is sent to the InputVStream (via write())
+ transformed_buffer->set_metadata(input.get_metadata());
+
+ return transformed_buffer.release();
+}
+
+Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+ const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, const std::string &name,
+ hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ auto transform_context = OutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format,
+ dst_quant_info, nms_info);
+ CHECK_EXPECTED(transform_context, "Failed Creating OutputTransformContext");
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto post_infer_elem_ptr = make_shared_nothrow<PostInferElement>(transform_context.release(),
+ name, duration_collector.release(), std::move(pipeline_status));
+ CHECK_AS_EXPECTED(nullptr != post_infer_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", post_infer_elem_ptr->name());
+
+ return post_infer_elem_ptr;
+}
+
+Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
+ const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return PostInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info, nms_info,
+ name, vstream_params.pipeline_elements_stats_flags, pipeline_status);
+}
+
+PostInferElement::PostInferElement(std::unique_ptr<OutputTransformContext> &&transform_context, const std::string &name,
+ DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_transform_context(std::move(transform_context))
+{}
+
+hailo_status PostInferElement::run_push(PipelineBuffer &&/*buffer*/)
+{
+ LOGGER__ERROR("PostInferElement does not support run_push operation");
+ return HAILO_INVALID_OPERATION;
+}
+
+PipelinePad &PostInferElement::next_pad()
+{
+ // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
+ return *m_sinks[0].prev();
+}
+
+std::string PostInferElement::description() const
+{
+ std::stringstream element_description;
+ element_description << "(" << this->name() << " | " << m_transform_context->description() << ")";
+ return element_description.str();
+}
+
+Expected<PipelineBuffer> PostInferElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
+{
+ CHECK_AS_EXPECTED(optional, HAILO_INVALID_ARGUMENT, "Optional buffer must be valid in {}!", name());
+
+ // Note: The latency to be measured starts as the buffer is read from the HW (it's 'input' in this case)
+ optional.set_metadata(input.get_metadata());
+
+ auto dst = optional.as_view();
+ m_duration_collector.start_measurement();
+ const auto status = m_transform_context->transform(input.as_view(), dst);
+ m_duration_collector.complete_measurement();
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return std::move(optional);
+}
+
+static hailo_nms_info_t fuse_nms_info(const std::vector<hailo_nms_info_t> &nms_infos)
+{
+ hailo_nms_info_t fused_info = nms_infos[0];
+ fused_info.is_defused = false;
+ fused_info.number_of_classes = 0;
+ for (const auto &nms_info : nms_infos) {
+ fused_info.number_of_classes += nms_info.number_of_classes;
+ }
+
+ return fused_info;
+}
+
+Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(std::shared_ptr<net_flow::Op> nms_op,
+ hailo_nms_info_t nms_info, const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
+ hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ assert(nms_op->outputs_metadata().size() == 1);
+ auto buffer_pool = BufferPool::create(HailoRTCommon::get_nms_host_frame_size(nms_info, nms_op->outputs_metadata().begin()->second.format),
+ buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+ CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto nms_elem_ptr = make_shared_nothrow<NmsPostProcessMuxElement>(nms_op, buffer_pool.release(),
+ name, timeout, duration_collector.release(), std::move(pipeline_status));
+ CHECK_AS_EXPECTED(nullptr != nms_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", nms_elem_ptr->name());
+ return nms_elem_ptr;
+}
+
+Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(std::shared_ptr<net_flow::Op> nms_op,
+ hailo_nms_info_t nms_info, const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return NmsPostProcessMuxElement::create(nms_op, nms_info, name, std::chrono::milliseconds(vstream_params.timeout_ms),
+ vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event,
+ pipeline_status);
+}
+
+NmsPostProcessMuxElement::NmsPostProcessMuxElement(std::shared_ptr<net_flow::Op> nms_op, BufferPoolPtr &&pool,
+ const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ BaseMuxElement(nms_op->inputs_metadata().size(), name, timeout, std::move(duration_collector), std::move(pipeline_status)),
+ m_nms_op(nms_op),
+ m_pool(std::move(pool))
+{}
+
+std::vector<AccumulatorPtr> NmsPostProcessMuxElement::get_queue_size_accumulators()
+{
+ if (nullptr == m_pool->get_queue_size_accumulator()) {
+ return std::vector<AccumulatorPtr>();
+ }
+ return {m_pool->get_queue_size_accumulator()};
+}
+
+Expected<PipelineBuffer> NmsPostProcessMuxElement::action(std::vector<PipelineBuffer> &&input_buffers, PipelineBuffer &&optional)
+{
+ std::map<std::string, MemoryView> inputs;
+ std::map<std::string, MemoryView> outputs;
+ for (size_t i = 0; i < input_buffers.size(); ++i) {
+ inputs.insert({m_sinks_names[i], input_buffers[i].as_view()});
+ }
+ auto acquired_buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
+ return make_unexpected(acquired_buffer.status());
+ }
+ CHECK_EXPECTED(acquired_buffer);
+ outputs.insert({"", acquired_buffer.value().as_view()}); // TODO: fill with correct name
+ m_duration_collector.start_measurement();
+
+ auto post_process_result = m_nms_op->execute(inputs, outputs);
+ m_duration_collector.complete_measurement();
+ CHECK_SUCCESS_AS_EXPECTED(post_process_result);
+ return acquired_buffer;
+}
+
+Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos,
+ const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
+ hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ const auto &fused_info = fuse_nms_info(nms_infos);
+ auto buffer_pool = BufferPool::create(HailoRTCommon::get_nms_hw_frame_size(fused_info),
+ buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+ CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto nms_elem_ptr = make_shared_nothrow<NmsMuxElement>(nms_infos, fused_info, buffer_pool.release(),
+ name, timeout, duration_collector.release(), std::move(pipeline_status));
+ CHECK_AS_EXPECTED(nullptr != nms_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", nms_elem_ptr->name());
+
+ return nms_elem_ptr;
+}
+
+Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos, const std::string &name,
+ const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ return NmsMuxElement::create(nms_infos, name, std::chrono::milliseconds(vstream_params.timeout_ms), vstream_params.queue_size,
+ vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event, pipeline_status);
+}
+
+NmsMuxElement::NmsMuxElement(const std::vector<hailo_nms_info_t> &nms_infos, const hailo_nms_info_t &fused_nms_info, BufferPoolPtr &&pool,
+ const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ BaseMuxElement(nms_infos.size(), name, timeout, std::move(duration_collector), std::move(pipeline_status)),
+ m_nms_infos(nms_infos),
+ m_fused_nms_info(fused_nms_info),
+ m_pool(std::move(pool))
+{}
+
+const hailo_nms_info_t &NmsMuxElement::get_fused_nms_info() const
+{
+ return m_fused_nms_info;
+}
+
+std::vector<AccumulatorPtr> NmsMuxElement::get_queue_size_accumulators()
+{
+ if (nullptr == m_pool->get_queue_size_accumulator()) {
+ return std::vector<AccumulatorPtr>();
+ }
+ return {m_pool->get_queue_size_accumulator()};
+}
+
+Expected<PipelineBuffer> NmsMuxElement::action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional)
+{
+ std::vector<MemoryView> input_views;
+
+ input_views.reserve(inputs.size());
+ for (auto &input_buf : inputs) {
+ input_views.push_back(input_buf.as_view());
+ }
+
+ auto acquired_buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
+ return make_unexpected(acquired_buffer.status());
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != acquired_buffer.status(), HAILO_TIMEOUT,
+ "{} failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_timeout.count());
+ CHECK_EXPECTED(acquired_buffer);
+
+ m_duration_collector.start_measurement();
+ const auto status = fuse_buffers(input_views, m_nms_infos, acquired_buffer.value().as_view());
+ m_duration_collector.complete_measurement();
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return acquired_buffer.release();
+}
+
+Expected<std::shared_ptr<TransformDemuxElement>> TransformDemuxElement::create(std::shared_ptr<OutputDemuxer> demuxer,
+ const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
+ hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ std::vector<BufferPoolPtr> pools;
+ pools.reserve(demuxer->get_edges_stream_info().size());
+
+ for (const auto& mux_edge : demuxer->get_edges_stream_info()) {
+ auto buffer_pool = BufferPool::create(mux_edge.hw_frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+ CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
+ pools.push_back(buffer_pool.release());
+ }
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto demux_elem_ptr = make_shared_nothrow<TransformDemuxElement>(demuxer, std::move(pools), name, timeout,
+ duration_collector.release(), std::move(pipeline_status));
+ CHECK_AS_EXPECTED(nullptr != demux_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return demux_elem_ptr;
+}
+
+TransformDemuxElement::TransformDemuxElement(std::shared_ptr<OutputDemuxer> demuxer, std::vector<BufferPoolPtr> &&pools,
+ const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+ BaseDemuxElement(demuxer->get_edges_stream_info().size(), name, timeout, std::move(duration_collector),
+ std::move(pipeline_status)),
+ m_demuxer(demuxer),
+ m_pools(std::move(pools))
+{}
+
+std::vector<AccumulatorPtr> TransformDemuxElement::get_queue_size_accumulators()
+{
+ std::vector<AccumulatorPtr> result;
+ for (const auto& pool : m_pools) {
+ if (nullptr != pool->get_queue_size_accumulator()) {
+ result.emplace_back(pool->get_queue_size_accumulator());
+ }
+ }
+ return result;
+}
+
+Expected<std::vector<PipelineBuffer>> TransformDemuxElement::action(PipelineBuffer &&input)
+{
+ std::vector<PipelineBuffer> outputs;
+ std::vector<MemoryView> raw_buffers;
+
+ auto mux_edges = m_demuxer->get_edges_stream_info();
+ outputs.reserve(mux_edges.size());
+ raw_buffers.reserve(mux_edges.size());
+
+ for (uint32_t i = 0; i < mux_edges.size(); i++) {
+ auto acquired_buffer = m_pools[i]->acquire_buffer(m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
+ return make_unexpected(acquired_buffer.status());
+ }
+ CHECK_EXPECTED(acquired_buffer, "Failed to acquire buffer");
+ outputs.emplace_back(acquired_buffer.release());
+
+ raw_buffers.push_back(outputs.back().as_view());
+ }
+
+ m_duration_collector.start_measurement();
+ const auto status = m_demuxer->transform_demux(input.as_view(), raw_buffers);
+ m_duration_collector.complete_measurement();
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return outputs;
+}
+
+BaseVStream::BaseVStream(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator, EventPtr &&core_op_activated_event,
+ hailo_status &output_status) :
+ m_vstream_info(vstream_info),
+ m_vstream_params(vstream_params),
+ m_measure_pipeline_latency((vstream_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0),
+ m_entry_element(pipeline_entry),
+ m_pipeline(std::move(pipeline)),
+ m_is_activated(false),
+ m_is_aborted(false),
+ m_pipeline_status(std::move(pipeline_status)),
+ m_shutdown_event(shutdown_event),
+ m_core_op_activated_event(std::move(core_op_activated_event)),
+ m_fps_accumulators(get_pipeline_accumulators_by_type(m_pipeline, AccumulatorType::FPS)),
+ m_latency_accumulators(get_pipeline_accumulators_by_type(m_pipeline, AccumulatorType::LATENCY)),
+ m_queue_size_accumulators(get_pipeline_queue_size_accumulators(m_pipeline)),
+ m_pipeline_latency_accumulator(pipeline_latency_accumulator)
+{
+ output_status = start_vstream();
+}
+
+BaseVStream::BaseVStream(BaseVStream &&other) noexcept :
+ m_vstream_info(std::move(other.m_vstream_info)),
+ m_vstream_params(std::move(other.m_vstream_params)),
+ m_measure_pipeline_latency(std::move(other.m_measure_pipeline_latency)),
+ m_entry_element(std::move(other.m_entry_element)),
+ m_pipeline(std::move(other.m_pipeline)),
+ m_is_activated(std::exchange(other.m_is_activated, false)),
+ m_is_aborted(std::exchange(other.m_is_aborted, false)),
+ m_pipeline_status(std::move(other.m_pipeline_status)),
+ m_shutdown_event(std::move(other.m_shutdown_event)),
+ m_core_op_activated_event(std::move(other.m_core_op_activated_event)),
+ m_fps_accumulators(std::move(other.m_fps_accumulators)),
+ m_latency_accumulators(std::move(other.m_latency_accumulators)),
+ m_queue_size_accumulators(std::move(other.m_queue_size_accumulators)),
+ m_pipeline_latency_accumulator(std::move(other.m_pipeline_latency_accumulator))
+{}
+
+BaseVStream& BaseVStream::operator=(BaseVStream &&other) noexcept
+{
+ if (this != &other) {
+ // operator= is used only for vstream creation BEFORE activation. otherwise we should deactivate vstream here
+ assert(!m_is_activated);
+ m_vstream_info = std::move(other.m_vstream_info);
+ m_vstream_params = std::move(other.m_vstream_params);
+ m_measure_pipeline_latency = std::move(other.m_measure_pipeline_latency);
+ m_entry_element = std::move(other.m_entry_element);
+ m_pipeline = std::move(other.m_pipeline);
+ m_is_activated = std::exchange(other.m_is_activated, false);
+ m_is_aborted = std::exchange(other.m_is_aborted, false);
+ m_pipeline_status = std::move(other.m_pipeline_status);
+ m_shutdown_event = std::move(other.m_shutdown_event);
+ m_core_op_activated_event = std::move(other.m_core_op_activated_event);
+ m_fps_accumulators = std::move(other.m_fps_accumulators);
+ m_latency_accumulators = std::move(other.m_latency_accumulators);
+ m_queue_size_accumulators = std::move(other.m_queue_size_accumulators);
+ m_pipeline_latency_accumulator = std::move(other.m_pipeline_latency_accumulator);
+ }
+ return *this;
+}
+
+hailo_status BaseVStream::start_vstream()
+{
+ auto status = m_shutdown_event->reset();
+ CHECK_SUCCESS(status);
+
+ LOGGER__DEBUG("Activating {}...", name());
+ status = m_entry_element->activate();
+ CHECK_SUCCESS(status);
+
+ status = resume();
+ CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+ "Failed to resume stream in {}", name());
+
+ m_is_activated = true;
+ return HAILO_SUCCESS;
+}
+
+hailo_status BaseVStream::abort()
+{
+ m_is_aborted = true;
+ return m_entry_element->abort();
+}
+
+hailo_status BaseVStream::resume()
+{
+ m_is_aborted = false;
+ return m_entry_element->resume();
+}
+
+hailo_status BaseVStream::stop_vstream()
+{
+ hailo_status status = HAILO_SUCCESS;
+ if (m_is_activated) {
+ m_is_activated = false;
+ status = m_entry_element->deactivate();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__WARNING("Failed deactivate of vstream {} status {}", name(), status);
+ }
+
+ status = m_entry_element->post_deactivate();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__WARNING("Failed post deactivate of vstream {} status {}", name(), status);
+ }
+ }
+ return status;
+}
+
+hailo_status BaseVStream::stop_and_clear()
+{
+ auto status = m_core_op_activated_event->wait(std::chrono::milliseconds(0));
+ CHECK(HAILO_TIMEOUT == status, HAILO_INVALID_OPERATION,
+ "Trying to clear {} vstream before its network group is deactivated", name());
+
+ status = stop_vstream();
+ CHECK_SUCCESS(status);
+
+ status = m_entry_element->clear();
+ CHECK_SUCCESS(status, "Failed clearing vstream {}", name());
+
+ const auto curr_pipeline_status = m_pipeline_status->load();
+ if (HAILO_SUCCESS != curr_pipeline_status) {
+ LOGGER__TRACE("Overwritting current pipeline status {}", curr_pipeline_status);
+ m_pipeline_status->store(HAILO_SUCCESS);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+size_t BaseVStream::get_frame_size() const
+{
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == m_vstream_info.format.order) {
+ return HailoRTCommon::get_nms_host_frame_size(m_vstream_info.nms_shape, m_vstream_params.user_buffer_format);
+ }
+ return HailoRTCommon::get_frame_size(m_vstream_info.shape, m_vstream_params.user_buffer_format);
+}
+
+const hailo_vstream_info_t &BaseVStream::get_info() const
+{
+ return m_vstream_info;
+}
+
+const hailo_format_t &BaseVStream::get_user_buffer_format() const
+{
+ return m_vstream_params.user_buffer_format;
+}
+
+std::string BaseVStream::name() const
+{
+ return std::string(m_vstream_info.name);
+}
+
+std::string BaseVStream::network_name() const
+{
+ return std::string(m_vstream_info.network_name);
+}
+
+const std::map<std::string, AccumulatorPtr> &BaseVStream::get_fps_accumulators() const
+{
+ return m_fps_accumulators;
+}
+
+const std::map<std::string, AccumulatorPtr> &BaseVStream::get_latency_accumulators() const
+{
+ return m_latency_accumulators;
+}
+
+const std::map<std::string, std::vector<AccumulatorPtr>> &BaseVStream::get_queue_size_accumulators() const
+{
+ return m_queue_size_accumulators;
+}
+
+AccumulatorPtr BaseVStream::get_pipeline_latency_accumulator() const
+{
+ return m_pipeline_latency_accumulator;
+}
+
+
+const std::vector<std::shared_ptr<PipelineElement>> &BaseVStream::get_pipeline() const
+{
+ return m_pipeline;
+}
+
+Expected<InputVStream> InputVStream::create(const hailo_vstream_info_t &vstream_info,
+ const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+ std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
+ AccumulatorPtr pipeline_latency_accumulator)
+{
+ auto vstream_internal = InputVStreamInternal::create(vstream_info, vstream_params, pipeline_entry, pipeline_exit,
+ std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
+ CHECK_EXPECTED(vstream_internal);
+
+ InputVStream vstream(vstream_internal.release());
+ return vstream;
+}
+
+hailo_status InputVStream::write(const MemoryView &buffer)
+{
+ return m_vstream->write(std::move(buffer));
+}
+
+hailo_status InputVStream::flush()
+{
+ return m_vstream->flush();
+}
+
+hailo_status InputVStream::clear(std::vector<InputVStream> &vstreams)
+{
+ for (auto &vstream : vstreams) {
+ auto status = vstream.stop_and_clear();
+ CHECK_SUCCESS(status);
+ }
+ for (auto &vstream : vstreams) {
+ auto status = vstream.start_vstream();
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputVStream::clear(std::vector<std::reference_wrapper<InputVStream>> &vstreams)
+{
+ for (auto &vstream : vstreams) {
+ auto status = vstream.get().stop_and_clear();
+ CHECK_SUCCESS(status);
+ }
+ for (auto &vstream : vstreams) {
+ auto status = vstream.get().start_vstream();
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputVStream::abort()
+{
+ return m_vstream->abort();
+}
+
+hailo_status InputVStream::resume()
+{
+ return m_vstream->resume();
+}
+
+size_t InputVStream::get_frame_size() const
+{
+ return m_vstream->get_frame_size();
+}
+
+const hailo_vstream_info_t &InputVStream::get_info() const
+{
+ return m_vstream->get_info();
+}
+
+const hailo_format_t &InputVStream::get_user_buffer_format() const
+{
+ return m_vstream->get_user_buffer_format();
+}
+
+std::string InputVStream::name() const
+{
+ return m_vstream->name();
+}
+
+std::string InputVStream::network_name() const
+{
+ return m_vstream->network_name();
+}
+
+const std::map<std::string, AccumulatorPtr> &InputVStream::get_fps_accumulators() const
+{
+ return m_vstream->get_fps_accumulators();
+}
+
+const std::map<std::string, AccumulatorPtr> &InputVStream::get_latency_accumulators() const
+{
+ return m_vstream->get_latency_accumulators();
+}
+
+const std::map<std::string, std::vector<AccumulatorPtr>> &InputVStream::get_queue_size_accumulators() const
+{
+ return m_vstream->get_queue_size_accumulators();
+}
+
+AccumulatorPtr InputVStream::get_pipeline_latency_accumulator() const
+{
+ return m_vstream->get_pipeline_latency_accumulator();
+}
+
+const std::vector<std::shared_ptr<PipelineElement>> &InputVStream::get_pipeline() const
+{
+ return m_vstream->get_pipeline();
+}
+
+hailo_status InputVStream::start_vstream()
+{
+ return m_vstream->start_vstream();
+}
+
+hailo_status InputVStream::stop_vstream()
+{
+ return m_vstream->stop_vstream();
+}
+
+hailo_status InputVStream::stop_and_clear()
+{
+ return m_vstream->stop_and_clear();
+}
+
+std::string InputVStream::get_pipeline_description() const
+{
+ return m_vstream->get_pipeline_description();
+}
+
+hailo_status InputVStream::before_fork()
+{
+ return m_vstream->before_fork();
+}
+
+hailo_status InputVStream::after_fork_in_parent()
+{
+ return m_vstream->after_fork_in_parent();
+}
+
+hailo_status InputVStream::after_fork_in_child()
+{
+ return m_vstream->after_fork_in_child();
+}
+
+InputVStream::InputVStream(std::shared_ptr<InputVStreamInternal> vstream) : m_vstream(std::move(vstream)) {}
+
+Expected<OutputVStream> OutputVStream::create(
+ const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator)
+{
+ auto vstream_internal = OutputVStreamInternal::create(vstream_info, vstream_params, pipeline_entry,
+ std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
+ CHECK_EXPECTED(vstream_internal);
+
+ OutputVStream vstream(vstream_internal.release());
+ return vstream;
+}
+
+hailo_status OutputVStream::read(MemoryView buffer)
+{
+ return m_vstream->read(std::move(buffer));
+}
+
+hailo_status OutputVStream::clear(std::vector<OutputVStream> &vstreams)
+{
+ for (auto &vstream : vstreams) {
+ auto status = vstream.stop_and_clear();
+ CHECK_SUCCESS(status);
+ }
+ for (auto &vstream : vstreams) {
+ auto status = vstream.start_vstream();
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStream::abort()
+{
+ return m_vstream->abort();
+}
+
+hailo_status OutputVStream::resume()
+{
+ return m_vstream->resume();
+}
+
+hailo_status OutputVStream::clear(std::vector<std::reference_wrapper<OutputVStream>> &vstreams)
+{
+ for (auto &vstream : vstreams) {
+ auto status = vstream.get().stop_and_clear();
+ CHECK_SUCCESS(status);
+ }
+ for (auto &vstream : vstreams) {
+ auto status = vstream.get().start_vstream();
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+size_t OutputVStream::get_frame_size() const
+{
+ return m_vstream->get_frame_size();
+}
+
+const hailo_vstream_info_t &OutputVStream::get_info() const
+{
+ return m_vstream->get_info();
+}
+
+const hailo_format_t &OutputVStream::get_user_buffer_format() const
+{
+ return m_vstream->get_user_buffer_format();
+}
+
+std::string OutputVStream::name() const
+{
+ return m_vstream->name();
+}
+
+std::string OutputVStream::network_name() const
+{
+ return m_vstream->network_name();
+}
+
+const std::map<std::string, AccumulatorPtr> &OutputVStream::get_fps_accumulators() const
+{
+ return m_vstream->get_fps_accumulators();
+}
+
+const std::map<std::string, AccumulatorPtr> &OutputVStream::get_latency_accumulators() const
+{
+ return m_vstream->get_latency_accumulators();
+}
+
+const std::map<std::string, std::vector<AccumulatorPtr>> &OutputVStream::get_queue_size_accumulators() const
+{
+ return m_vstream->get_queue_size_accumulators();
+}
+
+AccumulatorPtr OutputVStream::get_pipeline_latency_accumulator() const
+{
+ return m_vstream->get_pipeline_latency_accumulator();
+}
+
+const std::vector<std::shared_ptr<PipelineElement>> &OutputVStream::get_pipeline() const
+{
+ return m_vstream->get_pipeline();
+}
+
+hailo_status OutputVStream::start_vstream()
+{
+ return m_vstream->start_vstream();
+}
+
+hailo_status OutputVStream::stop_vstream()
+{
+ return m_vstream->stop_vstream();
+}
+
+hailo_status OutputVStream::stop_and_clear()
+{
+ return m_vstream->stop_and_clear();
+}
+
+std::string OutputVStream::get_pipeline_description() const
+{
+ return m_vstream->get_pipeline_description();
+}
+
+hailo_status OutputVStream::before_fork()
+{
+ return m_vstream->before_fork();
+}
+
+hailo_status OutputVStream::after_fork_in_parent()
+{
+ return m_vstream->after_fork_in_parent();
+}
+
+hailo_status OutputVStream::after_fork_in_child()
+{
+ return m_vstream->after_fork_in_child();
+}
+
+OutputVStream::OutputVStream(std::shared_ptr<OutputVStreamInternal> vstream) : m_vstream(std::move(vstream)) {}
+
+std::map<std::string, AccumulatorPtr> get_pipeline_accumulators_by_type(
+ const std::vector<std::shared_ptr<PipelineElement>> &pipeline, AccumulatorType accumulator_type)
+{
+ std::map<std::string, AccumulatorPtr> result;
+ for (const auto &elem : pipeline) {
+ if (nullptr == elem) {
+ continue;
+ }
+
+ AccumulatorPtr accumulator = nullptr;
+ if (AccumulatorType::FPS == accumulator_type) {
+ accumulator = elem->get_fps_accumulator();
+ } else if (AccumulatorType::LATENCY == accumulator_type) {
+ accumulator = elem->get_latency_accumulator();
+ } else {
+ continue;
+ }
+
+ if (nullptr != accumulator) {
+ result.emplace(elem->name(), accumulator);
+ }
+ }
+
+ return result;
+}
+
+std::map<std::string, std::vector<AccumulatorPtr>> get_pipeline_queue_size_accumulators(
+ const std::vector<std::shared_ptr<PipelineElement>> &pipeline)
+{
+ std::map<std::string, std::vector<AccumulatorPtr>> result;
+ for (const auto &elem : pipeline) {
+ if (nullptr == elem) {
+ continue;
+ }
+
+ const auto accumulators = elem->get_queue_size_accumulators();
+ if (0 != accumulators.size()) {
+ result.emplace(elem->name(), accumulators);
+ }
+ }
+
+ return result;
+}
+
+Expected<std::shared_ptr<InputVStreamInternal>> InputVStreamInternal::create(const hailo_vstream_info_t &vstream_info,
+ const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+ std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
+ AccumulatorPtr pipeline_latency_accumulator)
+{
+ auto vstream = InputVStreamImpl::create(vstream_info, vstream_params, pipeline_entry, pipeline_exit,
+ std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
+ CHECK_EXPECTED(vstream);
+ auto vstream_ptr = std::shared_ptr<InputVStreamInternal>(vstream.release());
+ return vstream_ptr;
+}
+
+InputVStreamInternal::InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator, EventPtr &&core_op_activated_event,
+ hailo_status &output_status) :
+ BaseVStream(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+ shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status){}
+
+Expected<std::shared_ptr<InputVStreamImpl>> InputVStreamImpl::create(const hailo_vstream_info_t &vstream_info,
+ const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+ std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
+ AccumulatorPtr pipeline_latency_accumulator)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ if (nullptr != pipeline_latency_accumulator) {
+ pipeline_exit->sink().set_push_complete_callback([pipeline_latency_accumulator](const PipelineBuffer::Metadata& metadata) {
+ const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::chrono::steady_clock::now() - metadata.get_start_time()).count();
+ pipeline_latency_accumulator->add_data_point(duration_sec);
+ });
+ }
+
+ auto vstream_ptr = std::shared_ptr<InputVStreamImpl>(new InputVStreamImpl(vstream_info, vstream_params, std::move(pipeline_entry), std::move(pipeline),
+ std::move(pipeline_status), shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), status));
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed to create virtual stream");
+
+ return vstream_ptr;
+}
+
+InputVStreamImpl::InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr core_op_activated_event, hailo_status &output_status) :
+ InputVStreamInternal(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+ shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status)
+{
+ if (HAILO_SUCCESS != output_status) {
+ return;
+ }
+ LOGGER__INFO("Creating {}...", name());
+}
+
+InputVStreamImpl::~InputVStreamImpl()
+{
+ (void)stop_vstream();
+ if (m_is_aborted) {
+ // If VStream was aborted, do not clear low-level stream abortion,
+ // otherwise flush would be called on low-level stream d-tor when there is no receiver.
+ (void)abort();
+ }
+}
+
+hailo_status InputVStreamImpl::write(const MemoryView &buffer)
+{
+ if (nullptr != m_core_op_activated_event) {
+ CHECK(m_is_activated, HAILO_VSTREAM_PIPELINE_NOT_ACTIVATED, "Failed to write buffer! Virtual stream {} is not activated!", name());
+ auto status = m_core_op_activated_event->wait(std::chrono::milliseconds(0));
+ CHECK(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED,
+ "Trying to write to vstream {} before its network group is activated", name());
+ }
+
+ auto status = m_entry_element->run_push(PipelineBuffer(buffer, m_measure_pipeline_latency));
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ LOGGER__INFO("Sending to VStream was shutdown!");
+ status = m_pipeline_status->load();
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Sending to VStream was aborted!");
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+ return status;
+}
+
+hailo_status InputVStreamImpl::flush()
+{
+ auto status = m_entry_element->run_push(PipelineBuffer(PipelineBuffer::Type::FLUSH));
+ CHECK_SUCCESS(status);
+
+ status = m_entry_element->flush();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+Expected<std::shared_ptr<InputVStreamClient>> InputVStreamClient::create(uint32_t input_vstream_handle)
+{
+ grpc::ChannelArguments ch_args;
+ ch_args.SetMaxReceiveMessageSize(-1);
+ auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+ CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
+
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto user_buffer_format = client->InputVStream_get_user_buffer_format(input_vstream_handle);
+ CHECK_EXPECTED(user_buffer_format);
+
+ auto vstream_info = client->InputVStream_get_info(input_vstream_handle);
+ CHECK_EXPECTED(vstream_info);
+
+ return std::shared_ptr<InputVStreamClient>(new InputVStreamClient(std::move(client), std::move(input_vstream_handle),
+ user_buffer_format.release(), vstream_info.release()));
+}
+
+InputVStreamClient::InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t input_vstream_handle, hailo_format_t &&user_buffer_format,
+ hailo_vstream_info_t &&info)
+ : m_client(std::move(client)), m_handle(std::move(input_vstream_handle)), m_user_buffer_format(user_buffer_format), m_info(info) {}
+
+InputVStreamClient::~InputVStreamClient()
+{
+ auto reply = m_client->InputVStream_release(m_handle);
+ if (reply != HAILO_SUCCESS) {
+ LOGGER__CRITICAL("InputVStream_release failed!");
+ }
+}
+
+hailo_status InputVStreamClient::write(const MemoryView &buffer)
+{
+ return m_client->InputVStream_write(m_handle, buffer);
+}
+
+hailo_status InputVStreamClient::flush()
+{
+ return m_client->InputVStream_flush(m_handle);
+}
+
+hailo_status InputVStreamClient::abort()
+{
+ auto expected_client = HailoRtRpcClientUtils::create_client();
+ CHECK_EXPECTED_AS_STATUS(expected_client);
+ auto abort_client = expected_client.release();
+ return abort_client->InputVStream_abort(m_handle);
+}
+
+hailo_status InputVStreamClient::resume()
+{
+ return m_client->InputVStream_resume(m_handle);
+}
+
+size_t InputVStreamClient::get_frame_size() const
+{
+ auto frame_size = m_client->InputVStream_get_frame_size(m_handle);
+ if (!frame_size) {
+ LOGGER__CRITICAL("InputVStream_get_frame_size failed with status={}", frame_size.status());
+ return 0;
+ }
+ return frame_size.release();
+}
+
+const hailo_vstream_info_t &InputVStreamClient::get_info() const
+{
+ return m_info;
+}
+
+const hailo_format_t &InputVStreamClient::get_user_buffer_format() const
+{
+ return m_user_buffer_format;
+}
+
+std::string InputVStreamClient::name() const
+{
+ auto expected_name = m_client->InputVStream_name(m_handle);
+ if (!expected_name) {
+ LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
+ return "";
+ }
+ return expected_name.release();
+}
+
+std::string InputVStreamClient::network_name() const
+{
+ auto expected_name = m_client->InputVStream_network_name(m_handle);
+ if (!expected_name) {
+ LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
+ return "";
+ }
+ return expected_name.release();
+}
+
+const std::map<std::string, AccumulatorPtr> &InputVStreamClient::get_fps_accumulators() const
+{
+ LOGGER__ERROR("InputVStream::get_fps_accumulators function is not supported when using multi-process service");
+ return m_fps_accumulators;
+}
+const std::map<std::string, AccumulatorPtr> &InputVStreamClient::get_latency_accumulators() const
+{
+ LOGGER__ERROR("InputVStream::get_latency_accumulators function is not supported when using multi-process service");
+ return m_latency_accumulators;
+}
+
+const std::map<std::string, std::vector<AccumulatorPtr>> &InputVStreamClient::get_queue_size_accumulators() const
+{
+ LOGGER__ERROR("InputVStream::get_queue_size_accumulators function is not supported when using multi-process service");
+ return m_queue_size_accumulators;
+}
+AccumulatorPtr InputVStreamClient::get_pipeline_latency_accumulator() const
+{
+ LOGGER__ERROR("InputVStream::get_pipeline_latency_accumulator function is not supported when using multi-process service");
+ return m_pipeline_latency_accumulator;
+}
+const std::vector<std::shared_ptr<PipelineElement>> &InputVStreamClient::get_pipeline() const
+{
+ LOGGER__ERROR("InputVStream::get_pipeline function is not supported when using multi-process service");
+ return m_pipeline;
+}
+
+hailo_status InputVStreamClient::create_client()
+{
+ auto expected_client = HailoRtRpcClientUtils::create_client();
+ CHECK_EXPECTED_AS_STATUS(expected_client);
+ m_client = expected_client.release();
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputVStreamClient::before_fork()
+{
+ m_client.reset();
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputVStreamClient::after_fork_in_parent()
+{
+ return create_client();
+}
+
+hailo_status InputVStreamClient::after_fork_in_child()
+{
+ auto status = create_client();
+ CHECK_SUCCESS(status);
+ auto expected_dup_handle = m_client->InputVStream_dup_handle(OsUtils::get_curr_pid(), m_handle);
+ CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
+ m_handle = expected_dup_handle.value();
+ return HAILO_SUCCESS;
+}
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+std::string InputVStreamInternal::get_pipeline_description() const
+{
+ std::stringstream pipeline_str;
+ pipeline_str << "Input pipeline '" << name() << "': ";
+ for (const auto &element : m_pipeline) {
+ pipeline_str << element->description() << " >> ";
+ }
+ pipeline_str << "HW";
+ return pipeline_str.str();
+}
+
+Expected<std::shared_ptr<OutputVStreamInternal>> OutputVStreamInternal::create(
+ const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator)
+{
+ auto vstream = OutputVStreamImpl::create(vstream_info, vstream_params, pipeline_entry,
+ std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
+ CHECK_EXPECTED(vstream);
+ auto vstream_ptr = std::shared_ptr<OutputVStreamInternal>(vstream.release());
+ return vstream_ptr;
+}
+
+OutputVStreamInternal::OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry,
+ std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr core_op_activated_event, hailo_status &output_status) :
+ BaseVStream(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+ shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status){}
+
+Expected<std::shared_ptr<OutputVStreamImpl>> OutputVStreamImpl::create(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ CHECK_AS_EXPECTED(1 == pipeline_entry->sources().size(), HAILO_INVALID_ARGUMENT,
+ "OutputVStream's entry element is expected to have one source");
+
+ if (nullptr != pipeline_latency_accumulator) {
+ pipeline_entry->sources()[0].set_pull_complete_callback([pipeline_latency_accumulator](const PipelineBuffer::Metadata& metadata) {
+ const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::chrono::steady_clock::now() - metadata.get_start_time()).count();
+ pipeline_latency_accumulator->add_data_point(duration_sec);
+ });
+ }
+
+ auto vstream_ptr = std::shared_ptr<OutputVStreamImpl>(new OutputVStreamImpl(vstream_info, vstream_params, std::move(pipeline_entry), std::move(pipeline),
+ std::move(pipeline_status), shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), status));
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed to create virtual stream");
+
+ return vstream_ptr;
+}
+
+std::string OutputVStreamInternal::get_pipeline_description() const
+{
+ std::stringstream pipeline_str;
+ pipeline_str << "Output pipeline '" << name() << "': HW";
+ for (const auto &element : m_pipeline) {
+ pipeline_str << " >> " << element->description();
+ }
+ return pipeline_str.str();
+}
+
+OutputVStreamImpl::OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry,
+ std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr core_op_activated_event, hailo_status &output_status) :
+ OutputVStreamInternal(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+ shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status)
+{
+ if (HAILO_SUCCESS != output_status) {
+ return;
+ }
+
+ for (auto &element : m_pipeline) {
+ element->set_on_cant_pull_callback([this] () {
+ if (m_cant_read_callback) {
+ m_cant_read_callback();
+ }
+ });
+ element->set_on_can_pull_callback([this] () {
+ if (m_can_read_callback) {
+ m_can_read_callback();
+ }
+ });
+ }
+
+ LOGGER__INFO("Creating {}...", name());
+}
+
+OutputVStreamImpl::~OutputVStreamImpl()
+{
+ (void)stop_vstream();
+ if (m_is_aborted) {
+ // If VStream was aborted, do not clear low-level stream abortion,
+ // otherwise flush would be called on low-level stream d-tor when there is no receiver.
+ (void)abort();
+ }
+}
+
+hailo_status OutputVStreamImpl::read(MemoryView buffer)
+{
+ if (nullptr != m_core_op_activated_event) {
+ CHECK(m_is_activated, HAILO_VSTREAM_PIPELINE_NOT_ACTIVATED, "read() failed! Virtual stream {} is not activated!", name());
+ auto status = m_core_op_activated_event->wait(std::chrono::milliseconds(0));
+ if (HAILO_TIMEOUT == status) {
+ LOGGER__INFO("Trying to read from vstream {} before its network_group is activated", name());
+ return HAILO_NETWORK_GROUP_NOT_ACTIVATED;
+ }
+ CHECK_SUCCESS(status);
+ }
+
+ assert(1 == m_entry_element->sources().size());
+ auto recv_buffer = m_entry_element->sources()[0].run_pull(PipelineBuffer(buffer, m_measure_pipeline_latency));
+ auto status = recv_buffer.status();
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ LOGGER__INFO("Receiving to VStream was shutdown!");
+ status = m_pipeline_status->load();
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Receiving to VStream was aborted!");
+ m_entry_element->wait_for_finish();
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+ return status;
+}
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+Expected<std::shared_ptr<OutputVStreamClient>> OutputVStreamClient::create(uint32_t outputs_vstream_handle)
+{
+ grpc::ChannelArguments ch_args;
+ ch_args.SetMaxReceiveMessageSize(-1);
+ auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+ CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
+
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto user_buffer_format = client->OutputVStream_get_user_buffer_format(outputs_vstream_handle);
+ CHECK_EXPECTED(user_buffer_format);
+
+ auto info = client->OutputVStream_get_info(outputs_vstream_handle);
+ CHECK_EXPECTED(info);
+
+ return std::shared_ptr<OutputVStreamClient>(new OutputVStreamClient(std::move(client), std::move(outputs_vstream_handle),
+ user_buffer_format.release(), info.release()));
+}
+
+OutputVStreamClient::OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t outputs_vstream_handle, hailo_format_t &&user_buffer_format,
+ hailo_vstream_info_t &&info)
+ : m_client(std::move(client)), m_handle(std::move(outputs_vstream_handle)), m_user_buffer_format(user_buffer_format), m_info(info) {}
+
+OutputVStreamClient::~OutputVStreamClient()
+{
+ auto reply = m_client->OutputVStream_release(m_handle);
+ if (reply != HAILO_SUCCESS) {
+ LOGGER__CRITICAL("OutputVStream_release failed!");
+ }
+}
+
+hailo_status OutputVStreamClient::read(MemoryView buffer)
+{
+ return m_client->OutputVStream_read(m_handle, buffer);
+}
+
+hailo_status OutputVStreamClient::abort()
+{
+ auto expected_client = HailoRtRpcClientUtils::create_client();
+ CHECK_EXPECTED_AS_STATUS(expected_client);
+ auto abort_client = expected_client.release();
+ return abort_client->OutputVStream_abort(m_handle);
+}
+
+hailo_status OutputVStreamClient::resume()
+{
+ return m_client->OutputVStream_resume(m_handle);
+}
+
+size_t OutputVStreamClient::get_frame_size() const
+{
+ auto frame_size = m_client->OutputVStream_get_frame_size(m_handle);
+ if (!frame_size) {
+ LOGGER__CRITICAL("OutputVStream_get_frame_size failed with status={}", frame_size.status());
+ return 0;
+ }
+ return frame_size.release();
+}
+
+const hailo_vstream_info_t &OutputVStreamClient::get_info() const
+{
+ return m_info;
+}
+
+const hailo_format_t &OutputVStreamClient::get_user_buffer_format() const
+{
+ return m_user_buffer_format;
+}
+
+std::string OutputVStreamClient::name() const
+{
+ auto expected_name = m_client->OutputVStream_name(m_handle);
+ if (!expected_name) {
+ LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
+ return "";
+ }
+ return expected_name.release();
+}
+
+std::string OutputVStreamClient::network_name() const
+{
+ auto expected_name = m_client->OutputVStream_network_name(m_handle);
+ if (!expected_name) {
+ LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
+ return "";
+ }
+ return expected_name.release();
+}
+
+const std::map<std::string, AccumulatorPtr> &OutputVStreamClient::get_fps_accumulators() const
+{
+ LOGGER__ERROR("OutputVStream::get_fps_accumulators function is not supported when using multi-process service");
+ return m_fps_accumulators;
+}
+const std::map<std::string, AccumulatorPtr> &OutputVStreamClient::get_latency_accumulators() const
+{
+ LOGGER__ERROR("OutputVStream::get_latency_accumulators functoin is not supported when using multi-process service");
+ return m_latency_accumulators;
+}
+
+const std::map<std::string, std::vector<AccumulatorPtr>> &OutputVStreamClient::get_queue_size_accumulators() const
+{
+ LOGGER__ERROR("OutputVStream::get_queue_size_accumulators function is not supported when using multi-process service");
+ return m_queue_size_accumulators;
+}
+AccumulatorPtr OutputVStreamClient::get_pipeline_latency_accumulator() const
+{
+ LOGGER__ERROR("OutputVStream::get_pipeline_latency_accumulator function is not supported when using multi-process service");
+ return m_pipeline_latency_accumulator;
+}
+const std::vector<std::shared_ptr<PipelineElement>> &OutputVStreamClient::get_pipeline() const
+{
+ LOGGER__ERROR("OutputVStream::get_pipeline function is not supported when using multi-process service");
+ return m_pipeline;
+}
+
+hailo_status OutputVStreamClient::create_client()
+{
+ auto expected_client = HailoRtRpcClientUtils::create_client();
+ CHECK_EXPECTED_AS_STATUS(expected_client);
+ m_client = expected_client.release();
+ return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStreamClient::before_fork()
+{
+ m_client.reset();
+ return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStreamClient::after_fork_in_parent()
+{
+ return create_client();
+}
+
+hailo_status OutputVStreamClient::after_fork_in_child()
+{
+ auto status = create_client();
+ CHECK_SUCCESS(status);
+ auto expected_dup_handle = m_client->OutputVStream_dup_handle(OsUtils::get_curr_pid(), m_handle);
+ CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
+ m_handle = expected_dup_handle.value();
+ return HAILO_SUCCESS;
+}
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+Expected<std::shared_ptr<HwReadElement>> HwReadElement::create(std::shared_ptr<OutputStream> stream, const std::string &name, std::chrono::milliseconds timeout,
+ size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::unique_ptr<OutputTransformContext> transform_context)
+{
+ auto buffer_pool = BufferPool::create(stream->get_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+ CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
+
+ BufferPoolPtr transform_pool = nullptr;
+ if (transform_context) {
+ auto expected_transform_pool = BufferPool::create(transform_context->get_dst_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+ CHECK_EXPECTED(expected_transform_pool, "Failed creating BufferPool for {}", name);
+ transform_pool = expected_transform_pool.release();
+ }
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto hw_read_elem_ptr = make_shared_nothrow<HwReadElement>(stream, buffer_pool.release(), name, timeout,
+ duration_collector.release(), shutdown_event, std::move(pipeline_status), transform_pool, std::move(transform_context));
+ CHECK_AS_EXPECTED(nullptr != hw_read_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", hw_read_elem_ptr->name());
+
+ return hw_read_elem_ptr;
+}
+
+HwReadElement::HwReadElement(std::shared_ptr<OutputStream> stream, BufferPoolPtr buffer_pool, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ BufferPoolPtr transform_pool, std::unique_ptr<OutputTransformContext> transform_context) :
+ SourceElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_stream(stream),
+ m_pool(buffer_pool),
+ m_transform_pool(transform_pool),
+ m_timeout(timeout),
+ m_shutdown_event(shutdown_event),
+ m_activation_wait_or_shutdown(stream->get_core_op_activated_event(), shutdown_event),
+ m_transform_context(std::move(transform_context))
+{}
+
+uint32_t HwReadElement::get_invalid_frames_count()
+{
+ return m_stream->get_invalid_frames_count();
+}
+
+std::string HwReadElement::description() const
+{
+ std::stringstream element_description;
+ element_description << "(" << this->name() << " | hw_frame_size: " << m_stream->get_info().hw_frame_size << ")";
+
+ return element_description.str();
+}
+
+hailo_status HwReadElement::execute_post_deactivate()
+{
+ auto status = m_stream->clear_abort();
+ CHECK(((HAILO_SUCCESS == status) || (HAILO_STREAM_NOT_ACTIVATED == status)), status,
+ "Failed to clear abort stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwReadElement::execute_clear()
+{
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwReadElement::execute_flush()
+{
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status HwReadElement::execute_abort()
+{
+ auto status = m_stream->abort();
+ CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+ "Failed to execute abort stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwReadElement::execute_resume()
+{
+ auto status = m_stream->clear_abort();
+ CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+ "Failed to execute resume stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwReadElement::execute_wait_for_finish()
+{
+ return HAILO_SUCCESS;
+}
+
+std::vector<AccumulatorPtr> HwReadElement::get_queue_size_accumulators()
+{
+ if (nullptr == m_pool->get_queue_size_accumulator()) {
+ return std::vector<AccumulatorPtr>();
+ }
+ return {m_pool->get_queue_size_accumulator()};
+}
+
+hailo_status HwReadElement::run_push(PipelineBuffer &&/*buffer*/)
+{
+ return HAILO_INVALID_OPERATION;
+}
+
+Expected<PipelineBuffer> HwReadElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
+{
+ auto buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+ return make_unexpected(buffer.status());
+ }
+ CHECK_EXPECTED(buffer, "{} (D2H) failed with status={}", name(), buffer.status());
+
+ while (true) {
+ if (!m_stream->is_scheduled()) {
+ auto status = m_activation_wait_or_shutdown.wait(m_timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+ if (HAILO_TIMEOUT == status) {
+ return make_unexpected(HAILO_NETWORK_GROUP_NOT_ACTIVATED);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else {
+ auto status = m_activation_wait_or_shutdown.wait(std::chrono::milliseconds(0));
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+ return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+ }
+ }
+
+ MemoryView buffer_view(buffer.value().as_view());
+ m_duration_collector.start_measurement();
+ auto status = m_stream->read(buffer_view);
+ if (HAILO_INVALID_FRAME == status) {
+ m_stream->increase_invalid_frames_count(1);
+ status = HAILO_SUCCESS;
+ }
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ // Try again
+ continue;
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Reading from stream was aborted!");
+ return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status, "{} (D2H) failed with status={}", name(), status);
+ m_duration_collector.complete_measurement();
+
+ // TODO: This is for rare cases where a transormation is needed before another pipeline element
+ // Should be handled by the computational graph, and not here.
+ if (m_transform_context) {
+ auto transform_buffer = m_transform_pool->get_available_buffer(PipelineBuffer(), m_timeout);
+ CHECK_EXPECTED(buffer);
+ status = m_transform_context->transform(buffer_view, transform_buffer.value().as_view());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return transform_buffer.release();
+ }
+
+ return buffer.release();
+ }
+}
+
+hailo_status HwReadElement::execute_activate()
+{
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwReadElement::execute_deactivate()
+{
+ auto signal_shutdown_status = m_shutdown_event->signal();
+ if (HAILO_SUCCESS != signal_shutdown_status) {
+ LOGGER__ERROR("Signaling {} shutdown event failed with {}", name(), signal_shutdown_status);
+ }
+
+ auto abort_status = m_stream->abort();
+ if ((HAILO_SUCCESS != abort_status) && (HAILO_STREAM_NOT_ACTIVATED != abort_status)) {
+ LOGGER__ERROR("Abort {} failed with {}", name(), abort_status);
+ return abort_status;
+ }
+
+ return signal_shutdown_status;
+}
+
+Expected<std::shared_ptr<HwWriteElement>> HwWriteElement::create(std::shared_ptr<InputStream> stream, const std::string &name,
+ hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+
+ auto duration_collector = DurationCollector::create(elem_flags);
+ CHECK_EXPECTED(duration_collector);
+
+ auto got_flush_event = Event::create_shared(Event::State::not_signalled);
+ CHECK_AS_EXPECTED(nullptr != got_flush_event, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto hw_write_elem_ptr = make_shared_nothrow<HwWriteElement>(stream, name,
+ duration_collector.release(), std::move(pipeline_status), got_flush_event);
+ CHECK_AS_EXPECTED(nullptr != hw_write_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", hw_write_elem_ptr->name());
+
+ return hw_write_elem_ptr;
+}
+
+HwWriteElement::HwWriteElement(std::shared_ptr<InputStream> stream, const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event) :
+ SinkElement(name, std::move(duration_collector), std::move(pipeline_status)),
+ m_stream(stream), m_got_flush_event(got_flush_event)
+{}
+
+Expected<PipelineBuffer> HwWriteElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
+{
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status HwWriteElement::run_push(PipelineBuffer &&buffer)
+{
+ if (PipelineBuffer::Type::FLUSH == buffer.get_type()) {
+ hailo_status flush_status = m_stream->flush();
+ if (HAILO_STREAM_ABORTED_BY_USER == flush_status) {
+ LOGGER__INFO("Failed flushing input stream {} because stream was aborted", m_stream->to_string());
+ } else if (HAILO_SUCCESS != flush_status) {
+ LOGGER__ERROR("flush has failed in {} with status {}", name(), flush_status);
+ }
+ hailo_status status = m_got_flush_event->signal();
+ CHECK_SUCCESS(status);
+ return HAILO_SUCCESS;
+ }
+
+ m_duration_collector.start_measurement();
+ const auto status = m_stream->write(MemoryView(buffer.data(), buffer.size()));
+ m_duration_collector.complete_measurement();
+
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Failed to send on input stream {} because stream was aborted", m_stream->to_string());
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+ CHECK_SUCCESS(status, "{} (H2D) failed with status={}", name(), status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_activate()
+{
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_deactivate()
+{
+ // The flush operation will block until all buffers currently in the pipeline will be processed.
+ // We assume that no buffers are sent after the call for deactivate.
+ hailo_status flush_status = m_stream->flush();
+ if (HAILO_STREAM_ABORTED_BY_USER == flush_status) {
+ LOGGER__INFO("Failed flushing input stream {} because stream was aborted", m_stream->to_string());
+ // TODO: HRT-3621
+ return HAILO_SUCCESS;
+ } else if (HAILO_SUCCESS != flush_status) {
+ LOGGER__ERROR("flush has failed in {} with status {}", name(), flush_status);
+ }
+
+ auto abort_status = m_stream->abort();
+ CHECK(((abort_status == HAILO_SUCCESS) || (abort_status == HAILO_STREAM_NOT_ACTIVATED)), abort_status,
+ "Failed to abort stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_post_deactivate()
+{
+ auto status = m_stream->clear_abort();
+ CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+ "Failed to clear abort stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_clear()
+{
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_flush()
+{
+ hailo_status status = m_got_flush_event->wait(m_stream->get_timeout());
+ CHECK_SUCCESS(status);
+
+ status = m_got_flush_event->reset();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_abort()
+{
+ auto status = m_stream->abort();
+ CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+ "Failed to execute abort stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_resume()
+{
+ auto status = m_stream->clear_abort();
+ CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+ "Failed to execute resume stream in {}", name());
+ return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_wait_for_finish()
+{
+ return HAILO_SUCCESS;
+}
+
+std::string HwWriteElement::description() const
+{
+ std::stringstream element_description;
+ element_description << "(" << this->name() << " | hw_frame_size: " << m_stream->get_info().hw_frame_size << ")";
+
+ return element_description.str();
+}
+
+Expected<std::shared_ptr<CopyBufferElement>> CopyBufferElement::create(const std::string &name,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+{
+ auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
+ CHECK_EXPECTED(duration_collector);
+ auto elem_ptr = make_shared_nothrow<CopyBufferElement>(name, duration_collector.release(), std::move(pipeline_status));
+ CHECK_AS_EXPECTED(nullptr != elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ LOGGER__INFO("Created {}", elem_ptr->name());
+
+ return elem_ptr;
+}
+
+CopyBufferElement::CopyBufferElement(const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status) :
+ FilterElement(name, std::move(duration_collector), std::move(pipeline_status))
+{}
+
+PipelinePad &CopyBufferElement::next_pad()
+{
+ // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
+ return *m_sinks[0].prev();
+}
+
+Expected<PipelineBuffer> CopyBufferElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
+{
+ CHECK_AS_EXPECTED(optional, HAILO_INVALID_ARGUMENT, "Optional buffer must be passed to CopyBufferElement!");
+
+ CHECK_AS_EXPECTED(optional.size() == input.size(), HAILO_INVALID_ARGUMENT, "Optional buffer size does not equal to the input buffer size!");
+ memcpy(optional.data(), input.data(), optional.size());
+
+ return std::move(optional);
+}
+
+Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> VStreamsBuilder::create_vstreams(
+ ConfiguredNetworkGroup &net_group, bool quantized, hailo_format_type_t format_type,
+ const std::string &network_name)
+{
+ const auto params = HailoRTDefaults::get_vstreams_params(quantized, format_type);
+ return create_vstreams(net_group, params, network_name);
+}
+
+Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> VStreamsBuilder::create_vstreams(
+ ConfiguredNetworkGroup &net_group, const hailo_vstream_params_t &vstreams_params,
+ const std::string &network_name)
+{
+ std::map<std::string, hailo_vstream_params_t> vstreams_params_by_input_stream_name;
+ auto input_vstream_params = net_group.make_input_vstream_params(true, HAILO_FORMAT_TYPE_AUTO,
+ HAILO_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_name);
+ CHECK_EXPECTED(input_vstream_params);
+
+ for (auto params_pair : input_vstream_params.release()) {
+ vstreams_params_by_input_stream_name.emplace(std::make_pair(params_pair.first, vstreams_params));
+ }
+
+ auto expected_all_inputs = create_input_vstreams(net_group, vstreams_params_by_input_stream_name);
+ CHECK_EXPECTED(expected_all_inputs);
+
+ std::map<std::string, hailo_vstream_params_t> vstreams_params_by_output_stream_name;
+ auto output_vstream_params = net_group.make_output_vstream_params(true, HAILO_FORMAT_TYPE_AUTO,
+ HAILO_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_name);
+ CHECK_EXPECTED(output_vstream_params);
+
+ for (auto params_pair : output_vstream_params.release()) {
+ vstreams_params_by_output_stream_name.emplace(std::make_pair(params_pair.first, vstreams_params));
+ }
+
+ auto expected_all_outputs = create_output_vstreams(net_group, vstreams_params_by_output_stream_name);
+ CHECK_EXPECTED(expected_all_outputs);
+
+ return std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>(
+ expected_all_inputs.release(), expected_all_outputs.release());
+}
+
+static hailo_vstream_params_t expand_vstream_params_autos(const hailo_stream_info_t &stream_info,
+ const hailo_vstream_params_t &vstream_params)
+{
+ auto local_vstream_params = vstream_params;
+ local_vstream_params.user_buffer_format = HailoRTDefaults::expand_auto_format(vstream_params.user_buffer_format,
+ stream_info.format);
+ return local_vstream_params;
+}
+
+Expected<std::vector<InputVStream>> VStreamsBuilder::create_input_vstreams(ConfiguredNetworkGroup &net_group,
+ const std::map<std::string, hailo_vstream_params_t> &inputs_params)
+{
+ return net_group.create_input_vstreams(inputs_params);
+}
+
+Expected<std::vector<OutputVStream>> VStreamsBuilder::create_output_vstreams(ConfiguredNetworkGroup &net_group,
+ const std::map<std::string, hailo_vstream_params_t> &outputs_params)
+{
+ return net_group.create_output_vstreams(outputs_params);
+}
+
+Expected<std::vector<InputVStream>> VStreamsBuilderUtils::create_inputs(std::shared_ptr<InputStream> input_stream, const hailo_vstream_info_t &vstream_info,
+ const hailo_vstream_params_t &vstream_params)
+{
+ // TODO (HRT-4522): Support this measurement
+ CHECK_AS_EXPECTED(!(vstream_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_FPS), HAILO_NOT_IMPLEMENTED,
+ "Pipeline FPS statistics measurement is not implemented");
+
+ std::vector<std::shared_ptr<PipelineElement>> elements;
+ std::vector<InputVStream> vstreams;
+
+ EventPtr core_op_activated_event = nullptr;
+ if (!input_stream->is_scheduled()) {
+ core_op_activated_event = input_stream->get_core_op_activated_event();
+ }
+
+ auto shutdown_event = Event::create_shared(Event::State::not_signalled);
+ CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
+ CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
+ CHECK_EXPECTED(pipeline_latency_accumulator);
+
+ auto user_timeout = std::chrono::milliseconds(vstream_params.timeout_ms);
+
+ auto hw_write_elem = HwWriteElement::create(input_stream,
+ PipelineObject::create_element_name("HwWriteElement", input_stream->name(), input_stream->get_info().index),
+ vstream_params.pipeline_elements_stats_flags, pipeline_status);
+ CHECK_EXPECTED(hw_write_elem);
+ elements.insert(elements.begin(), hw_write_elem.value());
+
+ auto should_transform = InputTransformContext::is_transformation_required(input_stream->get_info().shape,
+ vstream_params.user_buffer_format, input_stream->get_info().hw_shape, input_stream->get_info().format,
+ input_stream->get_info().quant_info);
+
+ if (should_transform) {
+ std::shared_ptr<SinkElement> elem_after_post_infer = hw_write_elem.value();
+ auto queue_elem = PushQueueElement::create(
+ PipelineObject::create_element_name("PushQueueElement", input_stream->get_info().name, input_stream->get_info().index),
+ vstream_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED(queue_elem);
+ elements.insert(elements.begin(), queue_elem.value());
+ CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(queue_elem.value(), hw_write_elem.value()));
+
+ auto pre_infer_elem = PreInferElement::create(input_stream->get_info().shape, vstream_params.user_buffer_format,
+ input_stream->get_info().hw_shape, input_stream->get_info().format, input_stream->get_info().quant_info,
+ PipelineObject::create_element_name("PreInferElement", input_stream->get_info().name, input_stream->get_info().index),
+ vstream_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED(pre_infer_elem);
+ elements.insert(elements.begin(), pre_infer_elem.value());
+ CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_infer_elem.value(), queue_elem.value()));
+
+ input_stream->set_timeout(user_timeout);
+ auto vstream = InputVStream::create(vstream_info, vstream_params, pre_infer_elem.release(), hw_write_elem.release(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED(vstream);
+ vstreams.emplace_back(vstream.release());
+ } else {
+ input_stream->set_timeout(user_timeout);
+ auto vstream = InputVStream::create(vstream_info, vstream_params, hw_write_elem.value(), hw_write_elem.value(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED(vstream);
+ vstreams.emplace_back(vstream.release());
+ }
+
+ for (const auto &vstream : vstreams) {
+ LOGGER__INFO("{}", vstream.get_pipeline_description());
+ }
+
+ return vstreams;
+}
+
+Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_outputs(std::shared_ptr<OutputStream> output_stream,
+ NameToVStreamParamsMap &vstreams_params_map, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
+{
+ std::vector<std::shared_ptr<PipelineElement>> elements;
+ std::vector<OutputVStream> vstreams;
+
+ EventPtr core_op_activated_event = nullptr;
+ if (!output_stream->is_scheduled()) {
+ core_op_activated_event = output_stream->get_core_op_activated_event();
+ }
+
+ auto shutdown_event = Event::create_shared(Event::State::not_signalled);
+ CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
+ CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
+
+ assert(!vstreams_params_map.empty());
+
+ // Note: In case of multiple values in vstreams_params_map (e.g. in the case of demux), we'll set the
+ // pipeline_elements_stats_flags for the hw_read_element as bitwise or of all the flags.
+ hailo_pipeline_elem_stats_flags_t hw_read_element_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
+ hailo_vstream_stats_flags_t hw_read_stream_stats_flags = HAILO_VSTREAM_STATS_NONE;
+ size_t buffer_pool_size = 0;
+ for (const auto &elem_name_params : vstreams_params_map) {
+ hw_read_element_stats_flags |= elem_name_params.second.pipeline_elements_stats_flags;
+ hw_read_stream_stats_flags |= elem_name_params.second.vstream_stats_flags;
+ buffer_pool_size += elem_name_params.second.queue_size;
+ }
+
+ // TODO (HRT-4522): Support this measurement
+ CHECK_AS_EXPECTED(!(hw_read_stream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_FPS), HAILO_NOT_IMPLEMENTED,
+ "Pipeline FPS statistics measurement is not implemented");
+
+ auto hw_read_elem = HwReadElement::create(output_stream,
+ PipelineObject::create_element_name("HwReadElement", output_stream->name(), output_stream->get_info().index),
+ HAILO_INFINITE_TIMEOUT, buffer_pool_size, hw_read_element_stats_flags, hw_read_stream_stats_flags, shutdown_event, pipeline_status);
+ CHECK_EXPECTED(hw_read_elem);
+ elements.push_back(hw_read_elem.value());
+
+ if (output_stream->get_info().is_mux) {
+ hailo_status status = add_demux(output_stream, vstreams_params_map, std::move(elements), vstreams, hw_read_elem.value(),
+ shutdown_event, pipeline_status, output_vstream_infos);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ } else {
+ auto vstream_info = output_vstream_infos.find(output_stream->name());
+ CHECK_AS_EXPECTED(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
+ "Failed to find vstream info of {}", output_stream->name());
+
+ assert(1 == vstreams_params_map.size());
+ auto vstream_params = expand_vstream_params_autos(output_stream->get_info(), vstreams_params_map.begin()->second);
+
+ auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
+ CHECK_EXPECTED(pipeline_latency_accumulator);
+
+ auto should_transform = OutputTransformContext::is_transformation_required(output_stream->get_info().hw_shape,
+ output_stream->get_info().format, output_stream->get_info().shape,
+ vstream_params.user_buffer_format, output_stream->get_info().quant_info);
+
+ if (should_transform) {
+ auto hw_read_queue_elem = PullQueueElement::create(
+ PipelineObject::create_element_name("PullQueueElement_hw_read", output_stream->name(), output_stream->get_info().index),
+ vstream_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED(hw_read_queue_elem);
+ elements.push_back(hw_read_queue_elem.value());
+ CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_elem.value(), hw_read_queue_elem.value()));
+
+ auto post_infer_elem = PostInferElement::create(output_stream->get_info().hw_shape, output_stream->get_info().format,
+ output_stream->get_info().shape, vstream_params.user_buffer_format, output_stream->get_info().quant_info, output_stream->get_info().nms_info,
+ PipelineObject::create_element_name("PostInferElement", output_stream->name(), output_stream->get_info().index),
+ vstream_params, pipeline_status);
+ CHECK_EXPECTED(post_infer_elem);
+ elements.push_back(post_infer_elem.value());
+ CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_queue_elem.value(), post_infer_elem.value()));
+
+ auto post_infer_queue_elem = UserBufferQueueElement::create(
+ PipelineObject::create_element_name("UserBufferQueueElement_post_infer", output_stream->name(), output_stream->get_info().index),
+ vstream_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED(post_infer_queue_elem);
+ elements.push_back(post_infer_queue_elem.value());
+ CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
+
+ output_stream->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
+ hw_read_queue_elem->get()->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
+ auto vstream = OutputVStream::create(vstream_info->second, vstream_params, post_infer_queue_elem.release(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED(vstream);
+ vstreams.emplace_back(vstream.release());
+ } else {
+ output_stream->set_timeout(std::chrono::milliseconds(vstream_params.timeout_ms));
+ auto vstream = OutputVStream::create(vstream_info->second, vstream_params, hw_read_elem.release(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED(vstream);
+ vstreams.emplace_back(vstream.release());
+ }
+ }
+
+ for (const auto &vstream : vstreams) {
+ LOGGER__INFO("{}", vstream.get_pipeline_description());
+ }
+
+ return vstreams;
+}
+
+InputVStream VStreamsBuilderUtils::create_input(std::shared_ptr<InputVStreamInternal> input_vstream)
+{
+ return InputVStream(std::move(input_vstream));
+}
+
+OutputVStream VStreamsBuilderUtils::create_output(std::shared_ptr<OutputVStreamInternal> output_vstream)
+{
+ return OutputVStream(std::move(output_vstream));
+}
+
+static bool are_formats_equal(const hailo_format_t &format1, const hailo_format_t &format2) {
+ return ((format1.order == format2.order) && (format1.flags == format2.flags) && (format1.type == format2.type));
+}
+
+Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_nms(OutputStreamPtrVector &output_streams,
+ hailo_vstream_params_t vstreams_params,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
+{
+ for (const auto &out_stream : output_streams) {
+ CHECK_AS_EXPECTED(are_formats_equal(output_streams[0]->get_info().format, out_stream->get_info().format),
+ HAILO_INVALID_ARGUMENT, "All nms streams of the same virtual output must have the same format");
+ }
+
+ auto shutdown_event = Event::create_shared(Event::State::not_signalled);
+ CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
+ CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
+
+ std::vector<std::shared_ptr<PipelineElement>> elements;
+ std::vector<OutputVStream> vstreams;
+
+ hailo_status status = add_nms_fuse(output_streams, vstreams_params, elements, vstreams, shutdown_event,
+ pipeline_status, output_vstream_infos);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ for (const auto &vstream : vstreams) {
+ LOGGER__INFO("{}", vstream.get_pipeline_description());
+ }
+
+ return vstreams;
+}
+
+Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_process_nms(OutputStreamPtrVector &output_streams,
+ hailo_vstream_params_t vstreams_params,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
+ const NetFlowElement &nms_op)
+{
+ auto shutdown_event = Event::create_shared(Event::State::not_signalled);
+ CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
+ CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
+
+ std::vector<std::shared_ptr<PipelineElement>> elements;
+ std::vector<OutputVStream> vstreams;
+
+ hailo_status status = add_nms_post_process(output_streams, vstreams_params, elements, vstreams, shutdown_event,
+ pipeline_status, output_vstream_infos, nms_op);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ for (const auto &vstream : vstreams) {
+ LOGGER__INFO("{}", vstream.get_pipeline_description());
+ }
+
+ return vstreams;
+}
+
+hailo_status VStreamsBuilderUtils::add_demux(std::shared_ptr<OutputStream> output_stream, NameToVStreamParamsMap &vstreams_params_map,
+ std::vector<std::shared_ptr<PipelineElement>> &&base_elements, std::vector<OutputVStream> &vstreams,
+ std::shared_ptr<HwReadElement> hw_read_elem, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
+{
+ auto expected_demuxer = OutputDemuxer::create(*output_stream);
+ CHECK_EXPECTED_AS_STATUS(expected_demuxer);
+
+ std::shared_ptr<OutputDemuxer> demuxer_ptr = expected_demuxer.release();
+ CHECK(nullptr != demuxer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto status = output_stream->set_timeout(HAILO_INFINITE_TIMEOUT);
+ CHECK_SUCCESS(status);
+
+ // Note: In case of multiple values in vstreams_params_map (e.g. in the case of demux), we'll set the
+ // pipeline_elements_stats_flags for the demux_elem as bitwise or of all the flags.
+ hailo_pipeline_elem_stats_flags_t demux_elem_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
+ hailo_vstream_stats_flags_t demux_vstream_stats_flags = HAILO_VSTREAM_STATS_NONE;
+ size_t buffer_pool_size = 0;
+ for (const auto &elem_name_params : vstreams_params_map) {
+ demux_elem_stats_flags |= elem_name_params.second.pipeline_elements_stats_flags;
+ demux_vstream_stats_flags |= elem_name_params.second.vstream_stats_flags;
+ buffer_pool_size += elem_name_params.second.queue_size;
+ }
+
+ auto demux_elem = TransformDemuxElement::create(demuxer_ptr,
+ PipelineObject::create_element_name("TransformDemuxElement", output_stream->name(), output_stream->get_info().index),
+ std::chrono::milliseconds(HAILO_INFINITE), buffer_pool_size, demux_elem_stats_flags, demux_vstream_stats_flags, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(demux_elem);
+ base_elements.push_back(demux_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(hw_read_elem, demux_elem.value()));
+
+ EventPtr core_op_activated_event = nullptr;
+ if (!output_stream->is_scheduled()) {
+ core_op_activated_event = output_stream->get_core_op_activated_event();
+ }
+
+ uint32_t i = 0;
+ for (auto &edge_info : demuxer_ptr->get_edges_stream_info()) {
+ auto name_params_pair = vstreams_params_map.find(edge_info.name);
+ CHECK(name_params_pair != vstreams_params_map.end(), HAILO_NOT_FOUND,
+ "Failed to find vstreams params of edge {}", edge_info.name);
+
+ const auto vstream_info = output_vstream_infos.find(edge_info.name);
+ CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
+ "Failed to find vstream info of {}", edge_info.name);
+
+ const auto vstream_params = expand_vstream_params_autos(output_stream->get_info(), name_params_pair->second);
+
+ // For each mux vstream, we create a copy of the previous elements
+ auto current_vstream_elements = base_elements;
+
+ // For muxed VStreams we use the same pipeline_status for all
+ auto pipeline_status_copy = pipeline_status;
+ auto demux_queue_elem = PullQueueElement::create(
+ PipelineObject::create_element_name("PullQueueElement_demux", edge_info.name, edge_info.index),
+ vstream_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(demux_queue_elem);
+ current_vstream_elements.push_back(demux_queue_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(demux_elem.value(), demux_queue_elem.value(), i, 0));
+
+ demux_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
+
+ auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
+ CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
+
+ auto should_transform = OutputTransformContext::is_transformation_required(edge_info.hw_shape,
+ edge_info.format, edge_info.shape, vstream_params.user_buffer_format, edge_info.quant_info);
+
+ if (should_transform) {
+ auto post_infer_elem = PostInferElement::create(edge_info.hw_shape, edge_info.format,
+ edge_info.shape, vstream_params.user_buffer_format, edge_info.quant_info, edge_info.nms_info,
+ PipelineObject::create_element_name("PostInferElement", edge_info.name, edge_info.index),
+ vstream_params, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(post_infer_elem);
+ current_vstream_elements.push_back(post_infer_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(demux_queue_elem.value(), post_infer_elem.value()));
+
+ auto post_infer_queue_elem = UserBufferQueueElement::create(
+ PipelineObject::create_element_name("UserBufferQueueElement_post_infer", edge_info.name, edge_info.index),
+ vstream_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(post_infer_queue_elem);
+ current_vstream_elements.push_back(post_infer_queue_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
+
+ auto vstream = OutputVStream::create(vstream_info->second, vstream_params, post_infer_queue_elem.release(), std::move(current_vstream_elements),
+ std::move(pipeline_status_copy), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED_AS_STATUS(vstream);
+ vstreams.emplace_back(vstream.release());
+ } else {
+ // TODO: HRT-4179
+ auto user_copy_elem = CopyBufferElement::create(
+ PipelineObject::create_element_name("CopyBufferElement", edge_info.name, edge_info.index),
+ pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(user_copy_elem);
+ current_vstream_elements.push_back(user_copy_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(demux_queue_elem.value(), user_copy_elem.value()));
+
+ auto vstream = OutputVStream::create(vstream_info->second, vstream_params, user_copy_elem.release(), std::move(current_vstream_elements),
+ std::move(pipeline_status_copy), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED_AS_STATUS(vstream);
+ vstreams.emplace_back(vstream.release());
+ }
+ i++;
+ }
+ return HAILO_SUCCESS;
+}
+
+hailo_status VStreamsBuilderUtils::add_nms_fuse(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
+ std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
+{
+ std::vector<hailo_nms_info_t> nms_infos;
+ nms_infos.reserve(output_streams.size());
+ for (const auto &out_stream : output_streams) {
+ CHECK(out_stream->get_info().nms_info.defuse_info.class_group_index <= output_streams.size(),
+ HAILO_INVALID_ARGUMENT, "Not all defused nms outputs were grouped correctly!");
+ nms_infos.emplace_back(out_stream->get_info().nms_info);
+ }
+
+ // To get the fused layer name and src stream format, we use the stream info of one of the defuses
+ auto first_defused_stream_info = output_streams[0]->get_info();
+ auto fused_layer_name = first_defused_stream_info.nms_info.defuse_info.original_name;
+ auto src_stream_format = first_defused_stream_info.format;
+
+ auto vstream_info = output_vstream_infos.find(fused_layer_name);
+ CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
+ "Failed to find vstream info of {}", fused_layer_name);
+
+ vstreams_params = expand_vstream_params_autos(first_defused_stream_info, vstreams_params);
+ auto nms_elem = NmsMuxElement::create(nms_infos,
+ PipelineObject::create_element_name("NmsMuxElement", fused_layer_name, 0),
+ vstreams_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(nms_elem);
+ auto fused_layer_nms_info = nms_elem.value()->get_fused_nms_info();
+
+ for (uint32_t i = 0; i < output_streams.size(); ++i) {
+ const auto &curr_stream_info = output_streams[i]->get_info();
+ output_streams[i]->set_timeout(HAILO_INFINITE_TIMEOUT);
+
+ auto hw_read_elem = HwReadElement::create(output_streams[i],
+ PipelineObject::create_element_name("HwReadElement", curr_stream_info.name, curr_stream_info.index),
+ HAILO_INFINITE_TIMEOUT, vstreams_params.queue_size, vstreams_params.pipeline_elements_stats_flags,
+ vstreams_params.vstream_stats_flags, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(hw_read_elem);
+ elements.push_back(hw_read_elem.value());
+
+ auto nms_source_queue_elem = PullQueueElement::create(
+ PipelineObject::create_element_name("PullQueueElement_nms_source", curr_stream_info.name, curr_stream_info.index),
+ vstreams_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(nms_source_queue_elem);
+ elements.push_back(nms_source_queue_elem.value());
+ nms_source_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
+ CHECK_SUCCESS(PipelinePad::link_pads(hw_read_elem.value(), nms_source_queue_elem.value()));
+ CHECK_SUCCESS(PipelinePad::link_pads(nms_source_queue_elem.value(), nms_elem.value(), 0, i));
+ }
+ elements.push_back(nms_elem.value());
+
+ auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstreams_params);
+ CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
+
+ auto should_transform = OutputTransformContext::is_transformation_required({}, src_stream_format, {},
+ vstreams_params.user_buffer_format, vstream_info->second.quant_info);
+
+ EventPtr core_op_activated_event = nullptr;
+ if (!output_streams[0]->is_scheduled()) {
+ core_op_activated_event = output_streams[0]->get_core_op_activated_event();
+ }
+
+ if (should_transform) {
+ auto nms_queue_elem = PullQueueElement::create(
+ PipelineObject::create_element_name("PullQueueElement_nms", fused_layer_name, 0),
+ vstreams_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(nms_queue_elem);
+ nms_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
+ elements.push_back(nms_queue_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(nms_elem.value(), nms_queue_elem.value()));
+
+ auto post_infer_elem = PostInferElement::create({}, src_stream_format,
+ {}, vstreams_params.user_buffer_format, vstream_info->second.quant_info, fused_layer_nms_info,
+ PipelineObject::create_element_name("PostInferElement", fused_layer_name, 0), vstreams_params, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(post_infer_elem);
+
+ elements.push_back(post_infer_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(nms_queue_elem.value(), post_infer_elem.value()));
+
+ auto post_infer_queue_elem = UserBufferQueueElement::create(
+ PipelineObject::create_element_name("UserBufferQueueElement_post_infer", fused_layer_name, 0),
+ vstreams_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(post_infer_queue_elem);
+ elements.push_back(post_infer_queue_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
+
+ auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, post_infer_queue_elem.release(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED_AS_STATUS(vstream);
+ vstreams.emplace_back(vstream.release());
+ } else {
+ auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, nms_elem.release(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED_AS_STATUS(vstream);
+ vstreams.emplace_back(vstream.release());
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
+ std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
+ const NetFlowElement &nms_op)
+{
+ auto first_stream_info = output_streams[0]->get_info();
+ if (vstreams_params.user_buffer_format.type == HAILO_FORMAT_TYPE_AUTO) {
+ vstreams_params.user_buffer_format.type = HAILO_FORMAT_TYPE_FLOAT32;
+ }
+ if (vstreams_params.user_buffer_format.order == HAILO_FORMAT_ORDER_AUTO) {
+ vstreams_params.user_buffer_format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
+ }
+ vstreams_params = expand_vstream_params_autos(first_stream_info, vstreams_params);
+ CHECK(vstreams_params.user_buffer_format.type == HAILO_FORMAT_TYPE_FLOAT32, HAILO_INVALID_ARGUMENT,
+ "NMS output format type must be HAILO_FORMAT_TYPE_FLOAT32");
+ CHECK(vstreams_params.user_buffer_format.order == HAILO_FORMAT_ORDER_HAILO_NMS, HAILO_INVALID_ARGUMENT,
+ "NMS output format order must be HAILO_FORMAT_ORDER_HAILO_NMS");
+
+ std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
+ std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+ for (uint32_t i = 0; i < output_streams.size(); ++i) {
+ const auto &curr_stream_info = output_streams[i]->get_info();
+ net_flow::BufferMetaData input_metadata = {
+ curr_stream_info.shape,
+ curr_stream_info.hw_shape,
+ curr_stream_info.format,
+ curr_stream_info.quant_info
+ };
+ inputs_metadata.insert({curr_stream_info.name, input_metadata});
+ }
+
+ const auto &output_pads = nms_op.op->outputs_metadata();
+ assert(output_pads.size() == 1);
+ auto vstream_info = output_vstream_infos.find(output_pads.begin()->first);
+ CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
+ "Failed to find vstream info of {}", nms_op.name);
+ net_flow::BufferMetaData output_metadata = {
+ vstream_info->second.shape,
+ vstream_info->second.shape,
+ vstream_info->second.format,
+ vstream_info->second.quant_info
+ };
+ outputs_metadata.insert({vstream_info->first, output_metadata});
+
+ auto nms_elem = NmsPostProcessMuxElement::create(nms_op.op, nms_op.nms_info,
+ PipelineObject::create_element_name("NmsPostProcessMuxElement", nms_op.name, 0),
+ vstreams_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(nms_elem);
+
+ hailo_format_t nms_src_format;
+ nms_src_format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+ nms_src_format.order = HAILO_FORMAT_ORDER_NHCW;
+ nms_src_format.type = first_stream_info.format.type;
+
+ for (uint32_t i = 0; i < output_streams.size(); ++i) {
+ const auto &curr_stream_info = output_streams[i]->get_info();
+ output_streams[i]->set_timeout(HAILO_INFINITE_TIMEOUT);
+
+ auto should_transform = OutputTransformContext::is_transformation_required(curr_stream_info.hw_shape, curr_stream_info.format,
+ curr_stream_info.hw_shape, nms_src_format, vstream_info->second.quant_info);
+
+ CHECK(!should_transform, HAILO_INVALID_ARGUMENT, "Unexpected transformation required for {}", curr_stream_info.name);
+
+ auto hw_read_elem = HwReadElement::create(output_streams[i],
+ PipelineObject::create_element_name("HwReadElement", curr_stream_info.name, curr_stream_info.index),
+ HAILO_INFINITE_TIMEOUT, vstreams_params.queue_size, vstreams_params.pipeline_elements_stats_flags,
+ vstreams_params.vstream_stats_flags, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(hw_read_elem);
+ elements.push_back(hw_read_elem.value());
+
+ auto nms_source_queue_elem = PullQueueElement::create(
+ PipelineObject::create_element_name("PullQueueElement_nms_source", curr_stream_info.name, curr_stream_info.index),
+ vstreams_params, shutdown_event, pipeline_status);
+ CHECK_EXPECTED_AS_STATUS(nms_source_queue_elem);
+ nms_source_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
+ elements.push_back(nms_source_queue_elem.value());
+ CHECK_SUCCESS(PipelinePad::link_pads(hw_read_elem.value(), nms_source_queue_elem.value()));
+ CHECK_SUCCESS(PipelinePad::link_pads(nms_source_queue_elem.value(), nms_elem.value(), 0, i));
+ nms_elem.value()->add_sink_name(curr_stream_info.name);
+ }
+ elements.push_back(nms_elem.value());
+
+ auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstreams_params);
+ CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
+
+ EventPtr core_op_activated_event = nullptr;
+ if (!output_streams[0]->is_scheduled()) {
+ core_op_activated_event = output_streams[0]->get_core_op_activated_event();
+ }
+
+ auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, nms_elem.release(), std::move(elements),
+ std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+ CHECK_EXPECTED_AS_STATUS(vstream);
+ vstreams.emplace_back(vstream.release());
+
+ return HAILO_SUCCESS;
+}
+
+Expected<AccumulatorPtr> VStreamsBuilderUtils::create_pipeline_latency_accumulator(const hailo_vstream_params_t &vstreams_params)
+{
+ AccumulatorPtr pipeline_latency_accumulator = nullptr;
+ const auto measure_latency = ((vstreams_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0);
+ if (measure_latency) {
+ pipeline_latency_accumulator = make_shared_nothrow<FullAccumulator<double>>("latency");
+ CHECK_AS_EXPECTED(nullptr != pipeline_latency_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ return pipeline_latency_accumulator;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vstream.hpp
+ * @brief Virtual Stream.
+ * Hence, the hierarchy is as follows:
+ * ------------------------------------------------------------------------------------------------
+ * | BaseVStream | (Internal "interface")
+ * | ___________________________|___________________________ |
+ * | / \ |
+ * | InputVStreamInternal OutputVStreamInternal | (Base classes)
+ * | / \ / \ |
+ * | InputVStreamImpl InputVStreamClient OuputVStreamImpl OutputVStreamClient | (Actual implementations)
+ * ------------------------------------------------------------------------------------------------
+ * -- InputVStream (External 'interface')
+ * |
+ * |__ std::share_ptr<InputVStreamInternal>
+ *
+ * -- OutputVStream (External 'interface')
+ * |
+ * |__ std::share_ptr<OutputVStreamInternal>
+ **/
+
+#ifndef _HAILO_VSTREAM_INTERNAL_HPP_
+#define _HAILO_VSTREAM_INTERNAL_HPP_
+
+#include "hailo/transform.hpp"
+#include "hailo/stream.hpp"
+
+#include "hef/hef_internal.hpp"
+#include "net_flow/pipeline/pipeline.hpp"
+#include "net_flow/ops/yolo_post_process.hpp"
+#include "network_group/network_group_internal.hpp"
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+#include "service/hailort_rpc_client.hpp"
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+
+namespace hailort
+{
+
+/*! Virtual stream base class */
+class BaseVStream
+{
+public:
+ BaseVStream(BaseVStream &&other) noexcept;
+ BaseVStream& operator=(BaseVStream &&other) noexcept;
+ virtual ~BaseVStream() = default;
+
+ virtual size_t get_frame_size() const;
+ virtual const hailo_vstream_info_t &get_info() const;
+ virtual const hailo_format_t &get_user_buffer_format() const;
+ virtual std::string name() const;
+ virtual std::string network_name() const;
+ virtual const std::map<std::string, AccumulatorPtr> &get_fps_accumulators() const;
+ virtual const std::map<std::string, AccumulatorPtr> &get_latency_accumulators() const;
+ virtual const std::map<std::string, std::vector<AccumulatorPtr>> &get_queue_size_accumulators() const;
+ virtual AccumulatorPtr get_pipeline_latency_accumulator() const;
+ virtual const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const;
+
+ virtual hailo_status abort();
+ virtual hailo_status resume();
+ virtual hailo_status start_vstream();
+ virtual hailo_status stop_vstream();
+ virtual hailo_status stop_and_clear();
+
+ virtual hailo_status before_fork() { return HAILO_SUCCESS; };
+ virtual hailo_status after_fork_in_parent() { return HAILO_SUCCESS; };
+ virtual hailo_status after_fork_in_child() { return HAILO_SUCCESS; };
+
+protected:
+ BaseVStream(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr &&core_op_activated_event, hailo_status &output_status);
+ BaseVStream() = default;
+
+ virtual std::string get_pipeline_description() const = 0;
+
+ hailo_vstream_info_t m_vstream_info;
+ hailo_vstream_params_t m_vstream_params;
+ bool m_measure_pipeline_latency;
+ std::shared_ptr<PipelineElement> m_entry_element;
+ std::vector<std::shared_ptr<PipelineElement>> m_pipeline;
+ volatile bool m_is_activated;
+ volatile bool m_is_aborted;
+ std::shared_ptr<std::atomic<hailo_status>> m_pipeline_status;
+ EventPtr m_shutdown_event;
+ EventPtr m_core_op_activated_event;
+ std::map<std::string, AccumulatorPtr> m_fps_accumulators;
+ std::map<std::string, AccumulatorPtr> m_latency_accumulators;
+ std::map<std::string, std::vector<AccumulatorPtr>> m_queue_size_accumulators;
+ AccumulatorPtr m_pipeline_latency_accumulator;
+};
+
+/*! Input virtual stream, used to stream data to device */
+class InputVStreamInternal : public BaseVStream
+{
+public:
+ static Expected<std::shared_ptr<InputVStreamInternal>> create(const hailo_vstream_info_t &vstream_info,
+ const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+ std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
+ AccumulatorPtr pipeline_latency_accumulator);
+ InputVStreamInternal(InputVStreamInternal &&other) noexcept = default;
+ InputVStreamInternal &operator=(InputVStreamInternal &&other) noexcept = default;
+ virtual ~InputVStreamInternal() = default;
+
+ virtual hailo_status write(const MemoryView &buffer) = 0;
+ virtual hailo_status flush() = 0;
+
+ virtual std::string get_pipeline_description() const override;
+
+protected:
+ InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr &&core_op_activated_event, hailo_status &output_status);
+ InputVStreamInternal() = default;
+};
+
+/*! Output virtual stream, used to read data from device */
+class OutputVStreamInternal : public BaseVStream
+{
+public:
+ static Expected<std::shared_ptr<OutputVStreamInternal>> create(
+ const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
+ OutputVStreamInternal(OutputVStreamInternal &&other) noexcept = default;
+ OutputVStreamInternal &operator=(OutputVStreamInternal &&other) noexcept = default;
+ virtual ~OutputVStreamInternal() = default;
+
+
+ virtual hailo_status read(MemoryView buffer) = 0;
+ virtual std::string get_pipeline_description() const override;
+
+protected:
+ OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr core_op_activated_event, hailo_status &output_status);
+ OutputVStreamInternal() = default;
+};
+
+class InputVStreamImpl : public InputVStreamInternal
+{
+public:
+ static Expected<std::shared_ptr<InputVStreamImpl>> create(const hailo_vstream_info_t &vstream_info,
+ const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+ std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
+ AccumulatorPtr pipeline_latency_accumulator);
+ InputVStreamImpl(InputVStreamImpl &&) noexcept = default;
+ InputVStreamImpl(const InputVStreamImpl &) = delete;
+ InputVStreamImpl &operator=(InputVStreamImpl &&) noexcept = default;
+ InputVStreamImpl &operator=(const InputVStreamImpl &) = delete;
+ virtual ~InputVStreamImpl();
+
+ virtual hailo_status write(const MemoryView &buffer) override;
+ virtual hailo_status flush() override;
+private:
+ InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr core_op_activated_event, hailo_status &output_status);
+};
+
+class OutputVStreamImpl : public OutputVStreamInternal
+{
+public:
+ static Expected<std::shared_ptr<OutputVStreamImpl>> create(
+ const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
+ EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
+ OutputVStreamImpl(OutputVStreamImpl &&) noexcept = default;
+ OutputVStreamImpl(const OutputVStreamImpl &) = delete;
+ OutputVStreamImpl &operator=(OutputVStreamImpl &&) noexcept = default;
+ OutputVStreamImpl &operator=(const OutputVStreamImpl &) = delete;
+ virtual ~OutputVStreamImpl();
+
+ virtual hailo_status read(MemoryView buffer);
+
+ void set_on_vstream_cant_read_callback(std::function<void()> callback)
+ {
+ m_cant_read_callback = callback;
+ }
+
+ void set_on_vstream_can_read_callback(std::function<void()> callback)
+ {
+ m_can_read_callback = callback;
+ }
+
+private:
+ OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+ std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
+ EventPtr core_op_activated_event, hailo_status &output_status);
+
+ std::function<void()> m_cant_read_callback;
+ std::function<void()> m_can_read_callback;
+};
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+class InputVStreamClient : public InputVStreamInternal
+{
+public:
+ static Expected<std::shared_ptr<InputVStreamClient>> create(uint32_t input_vstream_handle);
+ InputVStreamClient(InputVStreamClient &&) noexcept = default;
+ InputVStreamClient(const InputVStreamClient &) = delete;
+ InputVStreamClient &operator=(InputVStreamClient &&) noexcept = default;
+ InputVStreamClient &operator=(const InputVStreamClient &) = delete;
+ virtual ~InputVStreamClient();
+
+ virtual hailo_status write(const MemoryView &buffer) override;
+ virtual hailo_status flush() override;
+
+ virtual hailo_status abort() override;
+ virtual hailo_status resume() override;
+ virtual size_t get_frame_size() const override;
+ virtual const hailo_vstream_info_t &get_info() const override;
+ virtual const hailo_format_t &get_user_buffer_format() const override;
+ virtual std::string name() const override;
+ virtual std::string network_name() const override;
+ virtual const std::map<std::string, AccumulatorPtr> &get_fps_accumulators() const override;
+ virtual const std::map<std::string, AccumulatorPtr> &get_latency_accumulators() const override;
+ virtual const std::map<std::string, std::vector<AccumulatorPtr>> &get_queue_size_accumulators() const override;
+ virtual AccumulatorPtr get_pipeline_latency_accumulator() const override;
+ virtual const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const override;
+ virtual hailo_status before_fork() override;
+ virtual hailo_status after_fork_in_parent() override;
+ virtual hailo_status after_fork_in_child() override;
+
+private:
+ InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t input_vstream_handle, hailo_format_t &&user_buffer_format,
+ hailo_vstream_info_t &&info);
+ hailo_status create_client();
+
+ std::unique_ptr<HailoRtRpcClient> m_client;
+ uint32_t m_handle;
+ hailo_format_t m_user_buffer_format;
+ hailo_vstream_info_t m_info;
+};
+
+class OutputVStreamClient : public OutputVStreamInternal
+{
+public:
+ static Expected<std::shared_ptr<OutputVStreamClient>> create(uint32_t outputs_vstream_handle);
+ OutputVStreamClient(OutputVStreamClient &&) noexcept = default;
+ OutputVStreamClient(const OutputVStreamClient &) = delete;
+ OutputVStreamClient &operator=(OutputVStreamClient &&) noexcept = default;
+ OutputVStreamClient &operator=(const OutputVStreamClient &) = delete;
+ virtual ~OutputVStreamClient();
+
+ virtual hailo_status read(MemoryView buffer);
+
+ virtual hailo_status abort() override;
+ virtual hailo_status resume() override;
+ virtual size_t get_frame_size() const override;
+ virtual const hailo_vstream_info_t &get_info() const override;
+ virtual const hailo_format_t &get_user_buffer_format() const override;
+ virtual std::string name() const override;
+ virtual std::string network_name() const override;
+ virtual const std::map<std::string, AccumulatorPtr> &get_fps_accumulators() const override;
+ virtual const std::map<std::string, AccumulatorPtr> &get_latency_accumulators() const override;
+ virtual const std::map<std::string, std::vector<AccumulatorPtr>> &get_queue_size_accumulators() const override;
+ virtual AccumulatorPtr get_pipeline_latency_accumulator() const override;
+ virtual const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const override;
+ virtual hailo_status before_fork() override;
+ virtual hailo_status after_fork_in_parent() override;
+ virtual hailo_status after_fork_in_child() override;
+
+private:
+ OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t outputs_vstream_handle, hailo_format_t &&user_buffer_format,
+ hailo_vstream_info_t &&info);
+
+ hailo_status create_client();
+
+ std::unique_ptr<HailoRtRpcClient> m_client;
+ uint32_t m_handle;
+ hailo_format_t m_user_buffer_format;
+ hailo_vstream_info_t m_info;
+};
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+class PreInferElement : public FilterElement
+{
+public:
+ static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
+ const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
+ hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const std::string &name,
+ const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ PreInferElement(std::unique_ptr<InputTransformContext> &&transform_context, BufferPoolPtr buffer_pool,
+ const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual ~PreInferElement() = default;
+
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
+ virtual PipelinePad &next_pad() override;
+ virtual std::string description() const override;
+
+protected:
+ virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
+
+private:
+ std::unique_ptr<InputTransformContext> m_transform_context;
+ BufferPoolPtr m_pool;
+ std::chrono::milliseconds m_timeout;
+};
+
+class PostInferElement : public FilterElement
+{
+public:
+ static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+ const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, const std::string &name,
+ hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
+ const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ PostInferElement(std::unique_ptr<OutputTransformContext> &&transform_context, const std::string &name,
+ DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ virtual ~PostInferElement() = default;
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual PipelinePad &next_pad() override;
+ virtual std::string description() const override;
+
+protected:
+ virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
+
+private:
+ std::unique_ptr<OutputTransformContext> m_transform_context;
+};
+
+class NmsPostProcessMuxElement : public BaseMuxElement
+{
+public:
+ static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(std::shared_ptr<net_flow::Op> nms_op,
+ hailo_nms_info_t nms_info, const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
+ hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(std::shared_ptr<net_flow::Op> nms_op,
+ hailo_nms_info_t nms_info, const std::string &name, const hailo_vstream_params_t &vstream_params,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ NmsPostProcessMuxElement(std::shared_ptr<net_flow::Op> nms_op, BufferPoolPtr &&pool, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
+ void add_sink_name(const std::string &name) // TODO: remove this (HRT-8875)
+ {
+ m_sinks_names.push_back(name);
+ }
+
+protected:
+ virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) override;
+
+private:
+ std::shared_ptr<net_flow::Op> m_nms_op;
+ BufferPoolPtr m_pool;
+ std::vector<std::string> m_sinks_names; // TODO: remove this (HRT-8875)
+};
+
+class NmsMuxElement : public BaseMuxElement
+{
+public:
+ static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos,
+ const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
+ hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos, const std::string &name,
+ const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ NmsMuxElement(const std::vector<hailo_nms_info_t> &nms_infos, const hailo_nms_info_t &fused_nms_info, BufferPoolPtr &&pool, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+ const hailo_nms_info_t &get_fused_nms_info() const;
+
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
+
+protected:
+ virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) override;
+
+private:
+ std::vector<hailo_nms_info_t> m_nms_infos;
+ hailo_nms_info_t m_fused_nms_info;
+ BufferPoolPtr m_pool;
+};
+
+class TransformDemuxElement : public BaseDemuxElement
+{
+public:
+ static Expected<std::shared_ptr<TransformDemuxElement>> create(std::shared_ptr<OutputDemuxer> demuxer,
+ const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
+ hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ TransformDemuxElement(std::shared_ptr<OutputDemuxer> demuxer, std::vector<BufferPoolPtr> &&pools, const std::string &name,
+ std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
+
+protected:
+ virtual Expected<std::vector<PipelineBuffer>> action(PipelineBuffer &&input) override;
+
+private:
+ std::shared_ptr<OutputDemuxer> m_demuxer;
+ std::vector<BufferPoolPtr> m_pools;
+};
+
+class HwReadElement : public SourceElement
+{
+public:
+ static Expected<std::shared_ptr<HwReadElement>> create(std::shared_ptr<OutputStream> stream, const std::string &name, std::chrono::milliseconds timeout,
+ size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+ std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::unique_ptr<OutputTransformContext> m_transform_context = nullptr);
+ HwReadElement(std::shared_ptr<OutputStream> stream, BufferPoolPtr buffer_pool, const std::string &name, std::chrono::milliseconds timeout,
+ DurationCollector &&duration_collector, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+ BufferPoolPtr transform_pool = nullptr, std::unique_ptr<OutputTransformContext> transform_context = nullptr);
+ virtual ~HwReadElement() = default;
+
+ virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+ virtual hailo_status execute_activate() override;
+ virtual hailo_status execute_deactivate() override;
+ virtual hailo_status execute_post_deactivate() override;
+ virtual hailo_status execute_clear() override;
+ virtual hailo_status execute_flush() override;
+ virtual hailo_status execute_abort() override;
+ virtual hailo_status execute_resume() override;
+ virtual hailo_status execute_wait_for_finish() override;
+ uint32_t get_invalid_frames_count();
+ virtual std::string description() const override;
+
+private:
+ std::shared_ptr<OutputStream> m_stream;
+ BufferPoolPtr m_pool;
+ BufferPoolPtr m_transform_pool;
+ std::chrono::milliseconds m_timeout;
+ EventPtr m_shutdown_event;
+ WaitOrShutdown m_activation_wait_or_shutdown;
+ std::unique_ptr<OutputTransformContext> m_transform_context;
+};
+
+class HwWriteElement : public SinkElement
+{
+public:
+ static Expected<std::shared_ptr<HwWriteElement>> create(std::shared_ptr<InputStream> stream, const std::string &name,
+ hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ HwWriteElement(std::shared_ptr<InputStream> stream, const std::string &name, DurationCollector &&duration_collector,
+ std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event);
+ virtual ~HwWriteElement() = default;
+
+ virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+ virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+ virtual hailo_status execute_activate() override;
+ virtual hailo_status execute_deactivate() override;
+ virtual hailo_status execute_post_deactivate() override;
+ virtual hailo_status execute_clear() override;
+ virtual hailo_status execute_flush() override;
+ virtual hailo_status execute_abort() override;
+ virtual hailo_status execute_resume() override;
+ virtual hailo_status execute_wait_for_finish() override;
+ virtual std::string description() const override;
+
+private:
+ std::shared_ptr<InputStream> m_stream;
+ EventPtr m_got_flush_event;
+};
+
+class CopyBufferElement : public FilterElement
+{
+public:
+ static Expected<std::shared_ptr<CopyBufferElement>> create(const std::string &name, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ CopyBufferElement(const std::string &name, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+ virtual ~CopyBufferElement() = default;
+ virtual PipelinePad &next_pad() override;
+
+protected:
+ virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
+};
+
+class VStreamsBuilderUtils
+{
+public:
+ static Expected<std::vector<InputVStream>> create_inputs(std::shared_ptr<InputStream> input_stream, const hailo_vstream_info_t &input_vstream_infos,
+ const hailo_vstream_params_t &vstreams_params);
+ static Expected<std::vector<OutputVStream>> create_outputs(std::shared_ptr<OutputStream> output_stream,
+ NameToVStreamParamsMap &vstreams_params_map, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
+ static InputVStream create_input(std::shared_ptr<InputVStreamInternal> input_vstream);
+ static OutputVStream create_output(std::shared_ptr<OutputVStreamInternal> output_vstream);
+ static Expected<std::vector<OutputVStream>> create_output_nms(OutputStreamPtrVector &output_streams,
+ hailo_vstream_params_t vstreams_params,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
+ static Expected<std::vector<OutputVStream>> create_output_post_process_nms(OutputStreamPtrVector &output_streams,
+ hailo_vstream_params_t vstreams_params,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
+ const NetFlowElement &nms_op);
+ static hailo_status add_demux(std::shared_ptr<OutputStream> output_stream, NameToVStreamParamsMap &vstreams_params_map,
+ std::vector<std::shared_ptr<PipelineElement>> &&elements, std::vector<OutputVStream> &vstreams,
+ std::shared_ptr<HwReadElement> hw_read_elem, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
+ static hailo_status add_nms_fuse(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
+ std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
+ static hailo_status add_nms_post_process(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
+ std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
+ EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+ const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
+ const NetFlowElement &nms_op);
+ static Expected<AccumulatorPtr> create_pipeline_latency_accumulator(const hailo_vstream_params_t &vstreams_params);
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_VSTREAM_INTERNAL_HPP_ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/network_group.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file network_group.cpp
+ * @brief: Configured Network Group and Activated Network Group
+ **/
+
+#include "hailo/transform.hpp"
+#include "hailo/vstream.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/utils.hpp"
+#include "common/runtime_statistics_internal.hpp"
+
+#include "network_group/network_group_internal.hpp"
+#include "hef/hef_internal.hpp"
+#include "eth/eth_stream.hpp"
+#include "vdma/vdma_stream.hpp"
+#include "mipi/mipi_stream.hpp"
+#include "device_common/control.hpp"
+#include "net_flow/pipeline/vstream_internal.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+
+
+namespace hailort
+{
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroup::activate()
+{
+ const auto network_group_params = HailoRTDefaults::get_active_network_group_params();
+ return activate(network_group_params);
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupBase::activate(
+ const hailo_activate_network_group_params_t &network_group_params)
+{
+ return get_core_op()->activate(network_group_params);
+}
+
+/* Network group base functions */
+Expected<LatencyMeasurementResult> ConfiguredNetworkGroupBase::get_latency_measurement(const std::string &network_name)
+{
+ return get_core_op()->get_latency_measurement(network_name);
+}
+
+Expected<OutputStreamWithParamsVector> ConfiguredNetworkGroupBase::get_output_streams_from_vstream_names(
+ const std::map<std::string, hailo_vstream_params_t> &outputs_params)
+{
+ return get_core_op()->get_output_streams_from_vstream_names(outputs_params);
+}
+
+Expected<OutputStreamPtrVector> ConfiguredNetworkGroupBase::get_output_streams_by_vstream_name(const std::string &name)
+{
+ return get_core_op()->get_output_streams_by_vstream_name(name);
+}
+
+Expected<LayerInfo> ConfiguredNetworkGroupBase::get_layer_info(const std::string &stream_name)
+{
+ return get_core_op()->get_layer_info(stream_name);
+}
+
+ConfiguredNetworkGroupBase::ConfiguredNetworkGroupBase(
+ const ConfigureNetworkParams &config_params, std::vector<std::shared_ptr<CoreOp>> &&core_ops,
+ std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops) :
+ m_config_params(config_params),
+ m_core_ops(std::move(core_ops)),
+ m_net_flow_ops(std::move(net_flow_ops))
+{}
+
+// static func
+uint16_t ConfiguredNetworkGroupBase::get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params)
+{
+ // There are two possible situations:
+ // 1) All networks in the network group have the same configured (and hence smallest) batch_size =>
+ // We return that batch size.
+ // 2) Not all of the networks have the same configured (and hence smallest) batch_size. Currently, when
+ // using dynamic_batch_sizes, all networks will use the same dynamic_batch_size (until HRT-6535 is done).
+ // Hence, we must not set a dynamic_batch_size to a value greater than the smallest configured network
+ // batch_size (e.g. all the resources allocated are for at most the configured network batch_size).
+
+ /* We iterate over all network's batch_sizes to get the non-default min.
+ Ignoring HAILO_DEFAULT_BATCH_SIZE as it is not a real batch-value,
+ but indicating the scheduler should optimize batches by himself */
+ uint16_t min_batch_size = UINT16_MAX;
+ for (const auto &network_params_pair : config_params.network_params_by_name) {
+ if ((HAILO_DEFAULT_BATCH_SIZE != network_params_pair.second.batch_size) &&
+ (network_params_pair.second.batch_size < min_batch_size)) {
+ min_batch_size = network_params_pair.second.batch_size;
+ }
+ }
+ return (UINT16_MAX == min_batch_size) ? DEFAULT_ACTUAL_BATCH_SIZE : min_batch_size;
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupBase::activate_with_batch(uint16_t dynamic_batch_size,
+ bool resume_pending_stream_transfers)
+{
+ return get_core_op()->activate_with_batch(dynamic_batch_size, resume_pending_stream_transfers);
+}
+
+const std::string &ConfiguredNetworkGroupBase::get_network_group_name() const
+{
+ return get_core_op_metadata()->core_op_name();
+}
+
+const std::string &ConfiguredNetworkGroupBase::name() const
+{
+ return get_core_op_metadata()->core_op_name();
+}
+
+hailo_status ConfiguredNetworkGroupBase::activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ return get_core_op()->activate_low_level_streams(dynamic_batch_size, resume_pending_stream_transfers);
+}
+
+hailo_status ConfiguredNetworkGroupBase::deactivate_low_level_streams()
+{
+ return get_core_op()->deactivate_low_level_streams();
+}
+
+std::shared_ptr<CoreOp> ConfiguredNetworkGroupBase::get_core_op() const
+{
+ assert(m_core_ops.size() == 1);
+ return m_core_ops[0];
+}
+
+const std::shared_ptr<CoreOpMetadata> ConfiguredNetworkGroupBase::get_core_op_metadata() const
+{
+ assert(m_core_ops.size() == 1);
+ return m_core_ops[0]->metadata();
+}
+
+Expected<uint16_t> ConfiguredNetworkGroupBase::get_stream_batch_size(const std::string &stream_name)
+{
+ return get_core_op()->get_stream_batch_size(stream_name);
+}
+
+bool ConfiguredNetworkGroupBase::is_multi_context() const
+{
+ return get_core_op()->is_multi_context();
+}
+
+const ConfigureNetworkParams ConfiguredNetworkGroupBase::get_config_params() const
+{
+ return get_core_op()->get_config_params();
+}
+
+Expected<std::vector<std::string>> ConfiguredNetworkGroupBase::get_vstream_names_from_stream_name(const std::string &stream_name)
+{
+ return get_core_op()->get_vstream_names_from_stream_name(stream_name);
+}
+
+const SupportedFeatures &ConfiguredNetworkGroupBase::get_supported_features()
+{
+ return get_core_op()->get_supported_features();
+}
+
+hailo_status ConfiguredNetworkGroupBase::create_input_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
+{
+ return get_core_op()->create_input_stream_from_config_params(device, stream_params, stream_name);
+}
+
+hailo_status ConfiguredNetworkGroupBase::create_vdma_input_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
+{
+ return get_core_op()->create_vdma_input_stream(device, stream_name, layer_info, stream_params);
+}
+
+hailo_status ConfiguredNetworkGroupBase::create_output_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
+{
+ return get_core_op()->create_output_stream_from_config_params(device, stream_params, stream_name);
+}
+
+hailo_status ConfiguredNetworkGroupBase::create_vdma_output_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
+{
+ return get_core_op()->create_vdma_output_stream(device, stream_name, layer_info, stream_params);
+}
+
+hailo_status ConfiguredNetworkGroupBase::create_streams_from_config_params(Device &device)
+{
+ return get_core_op()->create_streams_from_config_params(device);
+}
+
+Expected<InputStreamRefVector> ConfiguredNetworkGroupBase::get_input_streams_by_network(const std::string &network_name)
+{
+ return get_core_op()->get_input_streams_by_network(network_name);
+}
+
+Expected<OutputStreamRefVector> ConfiguredNetworkGroupBase::get_output_streams_by_network(const std::string &network_name)
+{
+ return get_core_op()->get_output_streams_by_network(network_name);
+}
+
+InputStreamRefVector ConfiguredNetworkGroupBase::get_input_streams()
+{
+ return get_core_op()->get_input_streams();
+}
+
+OutputStreamRefVector ConfiguredNetworkGroupBase::get_output_streams()
+{
+ return get_core_op()->get_output_streams();
+}
+
+ExpectedRef<InputStream> ConfiguredNetworkGroupBase::get_input_stream_by_name(const std::string& name)
+{
+ return get_core_op()->get_input_stream_by_name(name);
+}
+
+ExpectedRef<OutputStream> ConfiguredNetworkGroupBase::get_output_stream_by_name(const std::string& name)
+{
+ return get_core_op()->get_output_stream_by_name(name);
+}
+
+std::vector<std::reference_wrapper<InputStream>> ConfiguredNetworkGroupBase::get_input_streams_by_interface(
+ hailo_stream_interface_t stream_interface)
+{
+ return get_core_op()->get_input_streams_by_interface(stream_interface);
+}
+
+std::vector<std::reference_wrapper<OutputStream>> ConfiguredNetworkGroupBase::get_output_streams_by_interface(
+ hailo_stream_interface_t stream_interface)
+{
+ return get_core_op()->get_output_streams_by_interface(stream_interface);
+}
+
+hailo_status ConfiguredNetworkGroupBase::wait_for_activation(const std::chrono::milliseconds &timeout)
+{
+ return get_core_op()->wait_for_activation(timeout);
+}
+
+Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroupBase::get_output_vstream_groups()
+{
+ return get_core_op()->get_output_vstream_groups();
+}
+
+Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroupBase::make_output_vstream_params_groups(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ return get_core_op()->make_output_vstream_params_groups(quantized, format_type, timeout_ms, queue_size);
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupBase::make_input_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ return get_core_op()->make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name);
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupBase::make_output_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ return get_core_op()->make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name);
+}
+
+Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroupBase::get_network_infos() const
+{
+ return get_core_op()->get_network_infos();
+}
+
+Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroupBase::get_all_stream_infos(
+ const std::string &network_name) const
+{
+ return get_core_op()->get_all_stream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupBase::get_input_vstream_infos(
+ const std::string &network_name) const
+{
+ return get_core_op()->get_input_vstream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupBase::get_output_vstream_infos(
+ const std::string &network_name) const
+{
+ return get_core_op()->get_output_vstream_infos(network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupBase::get_all_vstream_infos(
+ const std::string &network_name) const
+{
+ return get_core_op()->get_all_vstream_infos(network_name);
+}
+
+AccumulatorPtr ConfiguredNetworkGroupBase::get_activation_time_accumulator() const
+{
+ return get_core_op()->get_activation_time_accumulator();
+}
+
+AccumulatorPtr ConfiguredNetworkGroupBase::get_deactivation_time_accumulator() const
+{
+ return get_core_op()->get_deactivation_time_accumulator();
+}
+
+static hailo_vstream_params_t expand_vstream_params_autos(const hailo_stream_info_t &stream_info,
+ const hailo_vstream_params_t &vstream_params)
+{
+ auto local_vstream_params = vstream_params;
+ local_vstream_params.user_buffer_format = HailoRTDefaults::expand_auto_format(vstream_params.user_buffer_format,
+ stream_info.format);
+ return local_vstream_params;
+}
+
+static std::map<std::string, hailo_vstream_info_t> vstream_infos_vector_to_map(std::vector<hailo_vstream_info_t> &&vstream_info_vector)
+{
+ std::map<std::string, hailo_vstream_info_t> vstream_infos_map;
+ for (const auto &vstream_info : vstream_info_vector) {
+ vstream_infos_map.emplace(std::string(vstream_info.name), vstream_info);
+ }
+
+ return vstream_infos_map;
+}
+
+Expected<std::vector<InputVStream>> ConfiguredNetworkGroupBase::create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params)
+{
+ auto input_vstream_infos = get_input_vstream_infos();
+ CHECK_EXPECTED(input_vstream_infos);
+ auto input_vstream_infos_map = vstream_infos_vector_to_map(input_vstream_infos.release());
+
+ std::vector<InputVStream> vstreams;
+ vstreams.reserve(inputs_params.size());
+ for (const auto &name_params_pair : inputs_params) {
+ auto input_stream_expected = get_shared_input_stream_by_name(name_params_pair.first);
+ CHECK_EXPECTED(input_stream_expected);
+ auto input_stream = input_stream_expected.release();
+
+ const auto vstream_info = input_vstream_infos_map.find(name_params_pair.first);
+ CHECK_AS_EXPECTED(vstream_info != input_vstream_infos_map.end(), HAILO_NOT_FOUND,
+ "Failed to find vstream info of {}", name_params_pair.first);
+
+ const auto vstream_params = expand_vstream_params_autos(input_stream->get_info(), name_params_pair.second);
+ auto inputs = VStreamsBuilderUtils::create_inputs(input_stream, vstream_info->second, vstream_params);
+ CHECK_EXPECTED(inputs);
+
+ vstreams.insert(vstreams.end(), std::make_move_iterator(inputs->begin()), std::make_move_iterator(inputs->end()));
+ }
+ return vstreams;
+}
+
+Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupBase::create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params)
+{
+ std::vector<OutputVStream> vstreams;
+ vstreams.reserve(outputs_params.size());
+ auto output_streams = get_output_streams_from_vstream_names(outputs_params);
+ CHECK_EXPECTED(output_streams);
+
+ auto output_vstream_infos = get_output_vstream_infos();
+ CHECK_EXPECTED(output_vstream_infos);
+ auto output_vstream_infos_map = vstream_infos_vector_to_map(output_vstream_infos.release());
+
+ // We iterate through all output streams, and if they are nms, we collect them together by their original stream name.
+ // We need this step because all nms output streams of the same original stream need to be fused together
+
+ std::unordered_map<std::string, std::shared_ptr<NetFlowElement>> post_process_nms_ops;
+ std::set<std::string> post_process_stream_inputs;
+ for (auto &op : m_net_flow_ops) {
+ post_process_nms_ops.insert({op->name, op});
+ post_process_stream_inputs.insert(op->input_streams.begin(), op->input_streams.end());
+ }
+ std::map<std::string, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>> nms_op_output_streams;
+ std::map<std::string, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>> nms_output_streams;
+ for (auto &stream_params_pair : output_streams.value()) {
+ if ((HAILO_FORMAT_ORDER_HAILO_NMS == stream_params_pair.first->get_info().format.order && stream_params_pair.first->get_info().nms_info.is_defused) &&
+ (outputs_params.end() != outputs_params.find(stream_params_pair.first->get_info().nms_info.defuse_info.original_name))) {
+ auto original_name = stream_params_pair.first->get_info().nms_info.defuse_info.original_name;
+ nms_output_streams.emplace(original_name, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>(
+ OutputStreamPtrVector(), outputs_params.at(original_name)));
+ nms_output_streams[original_name].first.push_back(stream_params_pair.first);
+ } else if (post_process_stream_inputs.count(stream_params_pair.first->get_info().name)) {
+ for (auto &op : m_net_flow_ops) {
+ if (op->input_streams.count(stream_params_pair.first->get_info().name)) {
+ assert(op->op->outputs_metadata().size() == 1);
+ nms_op_output_streams.emplace(op->name, std::pair<OutputStreamPtrVector, hailo_vstream_params_t>(
+ OutputStreamPtrVector(), outputs_params.at(op->op->outputs_metadata().begin()->first)));
+ nms_op_output_streams[op->name].first.push_back(stream_params_pair.first);
+ }
+ }
+ } else {
+ auto outputs = VStreamsBuilderUtils::create_outputs(stream_params_pair.first, stream_params_pair.second, output_vstream_infos_map);
+ CHECK_EXPECTED(outputs);
+ vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
+ }
+ }
+ for (auto &nms_output_stream_pair : nms_output_streams) {
+ auto outputs = VStreamsBuilderUtils::create_output_nms(nms_output_stream_pair.second.first, nms_output_stream_pair.second.second,
+ output_vstream_infos_map);
+ CHECK_EXPECTED(outputs);
+ vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
+ }
+ for (auto &nms_output_stream_pair : nms_op_output_streams) {
+ auto op = post_process_nms_ops.at(nms_output_stream_pair.first);
+ auto outputs = VStreamsBuilderUtils::create_output_post_process_nms(nms_output_stream_pair.second.first,
+ nms_output_stream_pair.second.second, output_vstream_infos_map,
+ *op);
+ CHECK_EXPECTED(outputs);
+ vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
+ }
+
+ get_core_op()->set_vstreams_multiplexer_callbacks(vstreams);
+
+ return vstreams;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file network_group_internal.hpp
+ * @brief TODO: HRT-9547 - Change doc after moving NGs to global context + add explanation on lagacy names
+ * Class declaration for ConfiguredNetworkGroupBase and ActivatedCoreOp that implement the basic ConfiguredNetworkGroup
+ * and ActivatedNetworkGroup interfaces. All internal classes that are relevant should inherit from the
+ * ConfiguredNetworkGroupBase and ActivatedCoreOp classes.
+ * Hence, the hierarchy is as follows:
+ * --------------------------------------------------------------------------------------------------------------
+ * | ConfiguredNetworkGroup | (External "interface")
+ * | ________________________________|___________________________ |
+ * | / \ |
+ * | ConfiguredNetworkGroupBase ConfiguredNetworkGroupClient | (Base classes)
+ * | | |
+ * | | |
+ * | vector of CoreOps | (Actual implementations)
+ * -------------------------------------------------------------------------------------------------------------|
+ * | ActivatedNetworkGroup | (External "interface")
+ * | | |
+ * | ActivatedCoreOp | (Base classes)
+ * | __________________|_____________________________________________________ |
+ * | / | \ |
+ * | VdmaConfigActivatedCoreOp VDeviceActivatedCoreOp HcpConfigActivatedCoreOp | (Actual implementations)
+ * | | |
+ * | vector of VdmaConfigActivatedCoreOp |
+ * --------------------------------------------------------------------------------------------------------------
+ **/
+
+#ifndef _HAILO_NETWORK_GROUP_INTERNAL_HPP_
+#define _HAILO_NETWORK_GROUP_INTERNAL_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/network_group.hpp"
+
+#include "common/latency_meter.hpp"
+
+#include "hef/hef_internal.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+#include "core_op/active_core_op_holder.hpp"
+#include "core_op/core_op.hpp"
+
+#include "control_protocol.h"
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+#include "service/hailort_rpc_client.hpp"
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+
+namespace hailort
+{
+
+class ConfiguredNetworkGroupBase : public ConfiguredNetworkGroup
+{
+public:
+ static Expected<std::shared_ptr<ConfiguredNetworkGroupBase>> create(const ConfigureNetworkParams &config_params,
+ std::vector<std::shared_ptr<CoreOp>> &&core_ops, std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops)
+ {
+ auto net_group_ptr = std::shared_ptr<ConfiguredNetworkGroupBase>(new (std::nothrow)
+ ConfiguredNetworkGroupBase(config_params, std::move(core_ops), std::move(net_flow_ops)));
+ // auto net_group_ptr = make_shared_nothrow<ConfiguredNetworkGroupBase>(config_params, std::move(core_ops), std::move(net_flow_ops));
+ CHECK_NOT_NULL_AS_EXPECTED(net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return net_group_ptr;
+ }
+
+ virtual ~ConfiguredNetworkGroupBase() = default;
+ ConfiguredNetworkGroupBase(const ConfiguredNetworkGroupBase &other) = delete;
+ ConfiguredNetworkGroupBase &operator=(const ConfiguredNetworkGroupBase &other) = delete;
+ ConfiguredNetworkGroupBase &operator=(ConfiguredNetworkGroupBase &&other) = delete;
+ ConfiguredNetworkGroupBase(ConfiguredNetworkGroupBase &&other) = default;
+
+ Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_with_batch(
+ uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE,
+ bool resume_pending_stream_transfers = false);
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(
+ const hailo_activate_network_group_params_t &network_group_params) override;
+ virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override;
+
+ virtual const std::string &get_network_group_name() const override;
+ virtual const std::string &name() const override;
+
+ virtual Expected<InputStreamRefVector> get_input_streams_by_network(const std::string &network_name="") override;
+ virtual Expected<OutputStreamRefVector> get_output_streams_by_network(const std::string &network_name="") override;
+ virtual InputStreamRefVector get_input_streams() override;
+ virtual OutputStreamRefVector get_output_streams() override;
+ virtual std::vector<std::reference_wrapper<InputStream>> get_input_streams_by_interface(hailo_stream_interface_t stream_interface) override;
+ virtual std::vector<std::reference_wrapper<OutputStream>> get_output_streams_by_interface(hailo_stream_interface_t stream_interface) override;
+ virtual ExpectedRef<InputStream> get_input_stream_by_name(const std::string& name) override;
+ virtual ExpectedRef<OutputStream> get_output_stream_by_name(const std::string& name) override;
+ virtual Expected<OutputStreamWithParamsVector> get_output_streams_from_vstream_names(
+ const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
+ virtual Expected<LatencyMeasurementResult> get_latency_measurement(const std::string &network_name="") override;
+
+ virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name="") override;
+ virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name="") override;
+
+ virtual Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> make_output_vstream_params_groups(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size) override;
+
+ virtual Expected<std::vector<std::vector<std::string>>> get_output_vstream_groups() override;
+
+ virtual Expected<std::vector<hailo_network_info_t>> get_network_infos() const override;
+ virtual Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name="") const override;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name="") const override;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name="") const override;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const override;
+ virtual AccumulatorPtr get_activation_time_accumulator() const override;
+ virtual AccumulatorPtr get_deactivation_time_accumulator() const override;
+ hailo_status create_streams_from_config_params(Device &device);
+
+ virtual bool is_multi_context() const override;
+ virtual const ConfigureNetworkParams get_config_params() const override;
+
+ // TODO: HRT-9551 - Change to get_core_op_by_name() when multiple core_ops supported
+ std::shared_ptr<CoreOp> get_core_op() const;
+ // TODO: HRT-9546 Remove
+ const std::shared_ptr<CoreOpMetadata> get_core_op_metadata() const;
+
+ Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name);
+ const SupportedFeatures &get_supported_features();
+
+ Expected<uint16_t> get_stream_batch_size(const std::string &stream_name);
+
+ virtual Expected<std::vector<InputVStream>> create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params) override;
+ virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
+
+ Expected<std::shared_ptr<InputStream>> get_shared_input_stream_by_name(const std::string &stream_name)
+ {
+ return get_core_op()->get_shared_input_stream_by_name(stream_name);
+ }
+
+ Expected<std::shared_ptr<OutputStream>> get_shared_output_stream_by_name(const std::string &stream_name)
+ {
+ return get_core_op()->get_shared_output_stream_by_name(stream_name);
+ }
+
+ EventPtr get_core_op_activated_event()
+ {
+ return get_core_op()->m_core_op_activated_event;
+ }
+
+ hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers = false)
+ {
+ return get_core_op()->activate_impl(dynamic_batch_size, resume_pending_stream_transfers);
+ }
+
+ hailo_status deactivate_impl(bool keep_nn_config_during_reset)
+ {
+ return get_core_op()->deactivate_impl(keep_nn_config_during_reset);
+ }
+
+ Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+ {
+ return get_core_op()->create_activated_network_group(network_group_params, dynamic_batch_size, resume_pending_stream_transfers);
+ }
+
+ Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters()
+ {
+ return get_core_op()->get_latency_meters();
+ }
+
+ Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
+ {
+ return get_core_op()->get_boundary_vdma_channel_by_stream_name(stream_name);
+ }
+
+ virtual bool is_scheduled() const override
+ {
+ return get_core_op()->is_scheduled();
+ }
+
+ virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override
+ {
+ return get_core_op()->set_scheduler_timeout(timeout, network_name);
+ }
+
+ virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override
+ {
+ return get_core_op()->set_scheduler_threshold(threshold, network_name);
+ }
+
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override
+ {
+ return get_core_op()->get_default_streams_interface();
+ }
+
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override
+ {
+ return get_core_op()->set_scheduler_priority(priority, network_name);
+ }
+
+ std::vector<std::shared_ptr<CoreOp>> &get_core_ops()
+ {
+ return m_core_ops;
+ }
+
+private:
+ ConfiguredNetworkGroupBase(const ConfigureNetworkParams &config_params,
+ std::vector<std::shared_ptr<CoreOp>> &&core_ops, std::vector<std::shared_ptr<NetFlowElement>> &&net_flow_ops);
+
+ static uint16_t get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params);
+ hailo_status create_vdma_input_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
+ hailo_status create_vdma_output_stream(Device &device, const std::string &stream_name,
+ const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
+ hailo_status create_output_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
+ hailo_status create_input_stream_from_config_params(Device &device,
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
+ hailo_status add_mux_streams_by_edges_names(OutputStreamWithParamsVector &result,
+ const std::unordered_map<std::string, hailo_vstream_params_t> &outputs_edges_params);
+ Expected<OutputStreamPtrVector> get_output_streams_by_vstream_name(const std::string &name);
+ Expected<LayerInfo> get_layer_info(const std::string &stream_name);
+
+ hailo_status activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers);
+ hailo_status deactivate_low_level_streams();
+
+ const ConfigureNetworkParams m_config_params;
+ std::vector<std::shared_ptr<CoreOp>> m_core_ops;
+ std::vector<std::shared_ptr<NetFlowElement>> m_net_flow_ops;
+
+ friend class VDeviceCoreOp;
+ friend class VDeviceActivatedCoreOp;
+};
+
+// Move client ng to different header
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+class ConfiguredNetworkGroupClient : public ConfiguredNetworkGroup
+{
+public:
+ ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle);
+
+ virtual ~ConfiguredNetworkGroupClient();
+ ConfiguredNetworkGroupClient(const ConfiguredNetworkGroupClient &other) = delete;
+ ConfiguredNetworkGroupClient &operator=(const ConfiguredNetworkGroupClient &other) = delete;
+ ConfiguredNetworkGroupClient &operator=(ConfiguredNetworkGroupClient &&other) = delete;
+ ConfiguredNetworkGroupClient(ConfiguredNetworkGroupClient &&other) = default;
+
+ virtual const std::string &get_network_group_name() const override;
+ virtual const std::string &name() const override;
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
+ virtual std::vector<std::reference_wrapper<InputStream>> get_input_streams_by_interface(hailo_stream_interface_t stream_interface) override;
+ virtual std::vector<std::reference_wrapper<OutputStream>> get_output_streams_by_interface(hailo_stream_interface_t stream_interface) override;
+ virtual ExpectedRef<InputStream> get_input_stream_by_name(const std::string &name) override;
+ virtual ExpectedRef<OutputStream> get_output_stream_by_name(const std::string &name) override;
+ virtual Expected<InputStreamRefVector> get_input_streams_by_network(const std::string &network_name="") override;
+ virtual Expected<OutputStreamRefVector> get_output_streams_by_network(const std::string &network_name="") override;
+ virtual InputStreamRefVector get_input_streams() override;
+ virtual OutputStreamRefVector get_output_streams() override;
+ virtual Expected<OutputStreamWithParamsVector> get_output_streams_from_vstream_names(
+ const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
+
+ virtual Expected<LatencyMeasurementResult> get_latency_measurement(const std::string &network_name="") override;
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(const hailo_activate_network_group_params_t &network_group_params) override;
+ virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override;
+
+ virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name="") override;
+ virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name="") override;
+ virtual Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> make_output_vstream_params_groups(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size) override;
+ virtual Expected<std::vector<std::vector<std::string>>> get_output_vstream_groups() override;
+
+ virtual Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name="") const override;
+ virtual Expected<std::vector<hailo_network_info_t>> get_network_infos() const override;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name="") const override;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name="") const override;
+ virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const override;
+
+ virtual bool is_scheduled() const override;
+ virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override;
+
+ virtual AccumulatorPtr get_activation_time_accumulator() const override;
+ virtual AccumulatorPtr get_deactivation_time_accumulator() const override;
+
+ virtual bool is_multi_context() const override;
+ virtual const ConfigureNetworkParams get_config_params() const override;
+
+ virtual Expected<std::vector<InputVStream>> create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params);
+ virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params);
+
+ virtual hailo_status before_fork() override;
+ virtual hailo_status after_fork_in_parent() override;
+ virtual hailo_status after_fork_in_child() override;
+
+private:
+ hailo_status create_client();
+
+ std::unique_ptr<HailoRtRpcClient> m_client;
+ uint32_t m_handle;
+ std::string m_network_group_name;
+};
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+} /* namespace hailort */
+
+#endif /* _HAILO_NETWORK_GROUP_INTERNAL_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group_client.cpp
- * @brief: Network group client object
- **/
-
-#include "context_switch/network_group_internal.hpp"
-#include "common/utils.hpp"
-#include "hailort_defaults.hpp"
-#include "hailo/vstream.hpp"
-#include "vstream_internal.hpp"
-
-namespace hailort
-{
-
-ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle) :
- m_client(std::move(client)),
- m_handle(handle)
-{
- auto reply = m_client->ConfiguredNetworkGroup_name(m_handle);
- if (!reply) {
- LOGGER__ERROR("get_network_group_name failed with status {}", reply.status());
- return;
- }
- m_network_group_name = reply.value();
-}
-
-ConfiguredNetworkGroupClient::~ConfiguredNetworkGroupClient()
-{
- auto reply = m_client->ConfiguredNetworkGroup_release(m_handle);
- if (reply != HAILO_SUCCESS) {
- LOGGER__CRITICAL("ConfiguredNetworkGroup_release failed with status: {}", reply);
- }
-}
-
-Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupClient::activate(
- const hailo_activate_network_group_params_t &network_group_params)
-{
- (void)network_group_params;
- LOGGER__ERROR("ConfiguredNetworkGroup::activate function is not supported when using multi-process service, please use HailoRT Scheduler.");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-/* Network group base functions */
-Expected<LatencyMeasurementResult> ConfiguredNetworkGroupClient::get_latency_measurement(const std::string &network_name)
-{
- return m_client->ConfiguredNetworkGroup_get_latency_measurement(m_handle, network_name);
-}
-
-const std::string &ConfiguredNetworkGroupClient::get_network_group_name() const
-{
- return m_network_group_name;
-}
-
-const std::string &ConfiguredNetworkGroupClient::name() const
-{
- return m_network_group_name;
-}
-
-Expected<hailo_stream_interface_t> ConfiguredNetworkGroupClient::get_default_streams_interface()
-{
- return m_client->ConfiguredNetworkGroup_get_default_stream_interface(m_handle);
-}
-
-std::vector<std::reference_wrapper<InputStream>> ConfiguredNetworkGroupClient::get_input_streams_by_interface(hailo_stream_interface_t)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_input_streams_by_interface function is not supported when using multi-process service");
- std::vector<std::reference_wrapper<InputStream>> empty_vec;
- return empty_vec;
-}
-
-std::vector<std::reference_wrapper<OutputStream>> ConfiguredNetworkGroupClient::get_output_streams_by_interface(hailo_stream_interface_t)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams_by_interface function is not supported when using multi-process service");
- std::vector<std::reference_wrapper<OutputStream>> empty_vec;
- return empty_vec;
-}
-
-ExpectedRef<InputStream> ConfiguredNetworkGroupClient::get_input_stream_by_name(const std::string&)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_input_stream_by_name function is not supported when using multi-process service");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-ExpectedRef<OutputStream> ConfiguredNetworkGroupClient::get_output_stream_by_name(const std::string&)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_output_stream_by_name function is not supported when using multi-process service");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-Expected<InputStreamRefVector> ConfiguredNetworkGroupClient::get_input_streams_by_network(const std::string&)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_input_streams_by_network function is not supported when using multi-process service");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-Expected<OutputStreamRefVector> ConfiguredNetworkGroupClient::get_output_streams_by_network(const std::string&)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams_by_network function is not supported when using multi-process service");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-InputStreamRefVector ConfiguredNetworkGroupClient::get_input_streams()
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_input_streams function is not supported when using multi-process service");
- InputStreamRefVector empty_vec;
- return empty_vec;
-}
-
-OutputStreamRefVector ConfiguredNetworkGroupClient::get_output_streams()
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams function is not supported when using multi-process service");
- OutputStreamRefVector empty_vec;
- return empty_vec;
-}
-
-Expected<OutputStreamWithParamsVector> ConfiguredNetworkGroupClient::get_output_streams_from_vstream_names(const std::map<std::string, hailo_vstream_params_t>&)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams_from_vstream_names function is not supported when using multi-process service");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-hailo_status ConfiguredNetworkGroupClient::wait_for_activation(const std::chrono::milliseconds&)
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::wait_for_activation function is not supported when using multi-process service");
- return HAILO_NOT_IMPLEMENTED;
-}
-
-Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroupClient::get_output_vstream_groups()
-{
- return m_client->ConfiguredNetworkGroup_get_output_vstream_groups(m_handle);
-}
-
-Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroupClient::make_output_vstream_params_groups(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
-{
- return m_client->ConfiguredNetworkGroup_make_output_vstream_params_groups(m_handle,
- quantized, format_type, timeout_ms, queue_size);
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupClient::make_input_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name)
-{
- return m_client->ConfiguredNetworkGroup_make_input_vstream_params(m_handle,
- quantized, format_type, timeout_ms, queue_size, network_name);
-}
-
-Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupClient::make_output_vstream_params(
- bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
- const std::string &network_name)
-{
- return m_client->ConfiguredNetworkGroup_make_output_vstream_params(m_handle,
- quantized, format_type, timeout_ms, queue_size, network_name);
-}
-
-Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroupClient::get_all_stream_infos(const std::string &network_name) const
-{
- return m_client->ConfiguredNetworkGroup_get_all_stream_infos(m_handle, network_name);
-}
-
-Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroupClient::get_network_infos() const
-{
- return m_client->ConfiguredNetworkGroup_get_network_infos(m_handle);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_input_vstream_infos(
- const std::string &network_name) const
-{
- return m_client->ConfiguredNetworkGroup_get_input_vstream_infos(m_handle, network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_output_vstream_infos(
- const std::string &network_name) const
-{
- return m_client->ConfiguredNetworkGroup_get_output_vstream_infos(m_handle, network_name);
-}
-
-Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_all_vstream_infos(
- const std::string &network_name) const
-{
- return m_client->ConfiguredNetworkGroup_get_all_vstream_infos(m_handle, network_name);
-}
-
-hailo_status ConfiguredNetworkGroupClient::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
-{
- return m_client->ConfiguredNetworkGroup_set_scheduler_timeout(m_handle, timeout, network_name);
-}
-
-hailo_status ConfiguredNetworkGroupClient::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
-{
- return m_client->ConfiguredNetworkGroup_set_scheduler_threshold(m_handle, threshold, network_name);
-}
-
-AccumulatorPtr ConfiguredNetworkGroupClient::get_activation_time_accumulator() const
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_activation_time_accumulator function is not supported when using multi-process service");
- return AccumulatorPtr();
-}
-
-AccumulatorPtr ConfiguredNetworkGroupClient::get_deactivation_time_accumulator() const
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_deactivation_time_accumulator function is not supported when using multi-process service");
- return AccumulatorPtr();
-}
-
-bool ConfiguredNetworkGroupClient::is_multi_context() const
-{
- auto reply = m_client->ConfiguredNetworkGroup_is_multi_context(m_handle);
- if (reply.status() != HAILO_SUCCESS) {
- LOGGER__ERROR("is_multi_context failed with status {}", reply.status());
- return false;
- }
- return reply.value();
-}
-
-const ConfigureNetworkParams ConfiguredNetworkGroupClient::get_config_params() const
-{
- auto reply = m_client->ConfiguredNetworkGroup_get_config_params(m_handle);
- if (reply.status() != HAILO_SUCCESS) {
- LOGGER__ERROR("get_config_params failed with status {}", reply.status());
- return ConfigureNetworkParams();
- }
- return reply.value();
-}
-
-Expected<std::vector<InputVStream>> ConfiguredNetworkGroupClient::create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params)
-{
- auto reply = m_client->InputVStreams_create(m_handle, inputs_params, getpid());
- CHECK_EXPECTED(reply);
- auto input_vstreams_handles = reply.release();
- std::vector<InputVStream> vstreams;
- vstreams.reserve(input_vstreams_handles.size());
-
- for (uint32_t handle : input_vstreams_handles) {
- auto vstream_client = InputVStreamClient::create(handle);
- CHECK_EXPECTED(vstream_client);
- auto vstream = VStreamsBuilderUtils::create_input(vstream_client.release());
- vstreams.push_back(std::move(vstream));
- }
- return vstreams;
-}
-
-Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupClient::create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params)
-{
- auto reply = m_client->OutputVStreams_create(m_handle, outputs_params, getpid());
- CHECK_EXPECTED(reply);
- auto output_vstreams_handles = reply.release();
- std::vector<OutputVStream> vstreams;
- vstreams.reserve(output_vstreams_handles.size());
-
- for(uint32_t handle : output_vstreams_handles) {
- auto vstream_client = OutputVStreamClient::create(handle);
- CHECK_EXPECTED(vstream_client);
- auto vstream = VStreamsBuilderUtils::create_output(vstream_client.release());
- vstreams.push_back(std::move(vstream));
- }
- return vstreams;
-}
-
-} /* namespace hailort */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file network_group_metadata.cpp
- * @brief Contains all relevant information about a network group from the hef.
- **/
-
-#include "network_group_metadata.hpp"
-
-namespace hailort
-{
-
-static void get_demuxes_names_impl(const LayerInfo &info, std::vector<std::string> &res)
-{
- if (!info.is_mux) {
- res.push_back(info.name);
- } else {
- for (auto &pred : info.predecessor) {
- get_demuxes_names_impl(pred, res);
- }
- }
-}
-
-static std::vector<std::string> get_demuxes_names(const LayerInfo &info)
-{
- std::vector<std::string> res;
- get_demuxes_names_impl(info, res);
- return res;
-}
-
-static bool is_edge_under_mux(const LayerInfo &info, const std::string &edge_name)
-{
- if (!info.is_mux) {
- return edge_name == info.name;
- }
- for (const auto &pred : info.predecessor) {
- if (info.is_mux) {
- if (is_edge_under_mux(pred, edge_name)) {
- return true;
- }
- } else {
- if (edge_name == pred.name) {
- return true;
- }
- }
- }
- return false;
-}
-
-
-PreliminaryContextMetadata::PreliminaryContextMetadata(std::vector<ContextSwitchOperation> &&operations,
- ConfigBufferInfoMap&& config_buffers_info) :
- m_operations(std::move(operations)),
- m_config_buffers_info(std::move(config_buffers_info))
-{}
-
-const std::vector<ContextSwitchOperation> &PreliminaryContextMetadata::get_operations() const
-{
- return m_operations;
-}
-
-const ConfigBufferInfoMap &PreliminaryContextMetadata::config_buffers_info() const
-{
- return m_config_buffers_info;
-}
-
-ContextMetadata::ContextMetadata(std::vector<ContextSwitchOperation> &&operations,
- ConfigBufferInfoMap&& config_buffers_info) :
- m_operations(std::move(operations)),
- m_config_buffers_info(std::move(config_buffers_info))
-{}
-
-const std::vector<ContextSwitchOperation> &ContextMetadata::get_operations() const
-{
- return m_operations;
-}
-
-const ConfigBufferInfoMap &ContextMetadata::config_buffers_info() const
-{
- return m_config_buffers_info;
-}
-
-void ContextMetadata::add_boundary_layer(const LayerInfo &layer_info)
-{
- if (HAILO_H2D_STREAM == layer_info.direction) {
- m_boundary_input_layers.push_back(layer_info);
- } else {
- m_boundary_output_layers.push_back(layer_info);
- }
-}
-
-void ContextMetadata::add_inter_context_layer(const LayerInfo &layer_info)
-{
- if (HAILO_H2D_STREAM == layer_info.direction) {
- m_inter_context_input_layers.push_back(layer_info);
- } else {
- m_inter_context_output_layers.push_back(layer_info);
- }
-}
-
-void ContextMetadata::add_ddr_layer(const LayerInfo &layer_info)
-{
- if (HAILO_H2D_STREAM == layer_info.direction) {
- m_ddr_input_layers.push_back(layer_info);
- } else {
- m_ddr_output_layers.push_back(layer_info);
- }
-}
-
-const std::vector<LayerInfo> &ContextMetadata::get_boundary_input_layers() const
-{
- return m_boundary_input_layers;
-}
-
-const std::vector<LayerInfo> &ContextMetadata::get_boundary_output_layers() const
-{
- return m_boundary_output_layers;
-}
-
-const std::vector<LayerInfo> &ContextMetadata::get_inter_context_input_layers() const
-{
- return m_inter_context_input_layers;
-}
-
-const std::vector<LayerInfo> &ContextMetadata::get_inter_context_output_layers() const
-{
- return m_inter_context_output_layers;
-}
-
-const std::vector<LayerInfo> &ContextMetadata::get_ddr_input_layers() const
-{
- return m_ddr_input_layers;
-}
-
-const std::vector<LayerInfo> &ContextMetadata::get_ddr_output_layers() const
-{
- return m_ddr_output_layers;
-}
-
-NetworkGroupMetadata::NetworkGroupMetadata(const std::string &network_group_name,
- PreliminaryContextMetadata &&preliminary_context,
- std::vector<ContextMetadata> &&dynamic_contexts,
- std::vector<ConfigChannelInfo> &&config_channels_info,
- std::vector<std::string> &&sorted_output_names,
- SupportedFeatures &supported_features, const std::vector<std::string> &sorted_network_names)
- : m_preliminary_context(std::move(preliminary_context)),
- m_dynamic_contexts(std::move(dynamic_contexts)),
- m_config_channels_info(std::move(config_channels_info)),
- m_network_group_name(network_group_name), m_sorted_output_names(std::move(sorted_output_names)),
- m_supported_features(supported_features), m_sorted_network_names(sorted_network_names) {}
-
-Expected<LayerInfo> NetworkGroupMetadata::get_layer_info_by_stream_name(const std::string &stream_name) const
-{
- for (auto layer_info : get_all_layer_infos()) {
- if (layer_info.name == stream_name) {
- return layer_info;
- }
- }
- LOGGER__ERROR("Failed to find layer with name {}", stream_name);
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-std::vector<LayerInfo> NetworkGroupMetadata::get_input_layer_infos() const
-{
- std::vector<LayerInfo> res;
- // Edge layers exists only in the dynamic context.
- for (const auto &context : m_dynamic_contexts) {
- for (const auto &layer_info : context.get_boundary_input_layers()) {
- res.emplace_back(layer_info);
- }
- }
- return res;
-}
-
-std::vector<LayerInfo> NetworkGroupMetadata::get_output_layer_infos() const
-{
- std::vector<LayerInfo> res;
- // Edge layers exists only in the dynamic context.
- for (const auto &context : m_dynamic_contexts) {
- for (const auto &layer_info : context.get_boundary_output_layers()) {
- res.emplace_back(layer_info);
- }
- }
- return res;
-}
-
-std::vector<LayerInfo> NetworkGroupMetadata::get_all_layer_infos() const
-{
- const auto input_layer_infos = get_input_layer_infos();
- const auto output_layer_infos = get_output_layer_infos();
-
- std::vector<LayerInfo> res;
- res.reserve(input_layer_infos.size() + output_layer_infos.size());
- res.insert(res.end(), input_layer_infos.begin(), input_layer_infos.end());
- res.insert(res.end(), output_layer_infos.begin(), output_layer_infos.end());
-
- return res;
-}
-
-Expected<std::vector<LayerInfo>> NetworkGroupMetadata::get_input_layer_infos(const std::string &network_name) const
-{
- std::vector<LayerInfo> res;
- // Edge layers exists only in the dynamic context.
- for (const auto &context : m_dynamic_contexts) {
- for (const auto &layer_info : context.get_boundary_input_layers()) {
- if ((layer_info.network_name == network_name) || (network_name.empty()) || (network_name == default_network_name())) {
- res.emplace_back(layer_info);
- }
- }
- }
- CHECK_AS_EXPECTED(res.size() > 0, HAILO_NOT_FOUND, "Network name {} is not found in networks metadata", network_name);
- return res;
-}
-
-Expected<std::vector<LayerInfo>> NetworkGroupMetadata::get_output_layer_infos(const std::string &network_name) const
-{
- std::vector<LayerInfo> res;
- // Edge layers exists only in the dynamic context.
- for (const auto &context : m_dynamic_contexts) {
- for (auto &layer_info : context.get_boundary_output_layers()) {
- if ((layer_info.network_name == network_name) || (network_name.empty()) || (network_name == default_network_name())) {
- res.emplace_back(layer_info);
- }
- }
- }
- CHECK_AS_EXPECTED(res.size() > 0, HAILO_NOT_FOUND, "Network name {} is not found in networks metadata", network_name);
- return res;
-}
-
-const PreliminaryContextMetadata &NetworkGroupMetadata::preliminary_context() const
-{
- return m_preliminary_context;
-}
-
-const std::vector<ContextMetadata> &NetworkGroupMetadata::dynamic_contexts() const
-{
- return m_dynamic_contexts;
-}
-
-const std::vector<ConfigChannelInfo> &NetworkGroupMetadata::config_channels_info() const
-{
- return m_config_channels_info;
-}
-
-Expected<std::vector<LayerInfo>> NetworkGroupMetadata::get_all_layer_infos(const std::string &network_name) const
-{
- auto input_layer_infos = get_input_layer_infos(network_name);
- CHECK_EXPECTED(input_layer_infos);
-
- auto output_layer_infos = get_output_layer_infos(network_name);
- CHECK_EXPECTED(output_layer_infos);
-
- std::vector<LayerInfo> res;
- res.reserve(input_layer_infos->size() + output_layer_infos->size());
- res.insert(res.end(), input_layer_infos->begin(), input_layer_infos->end());
- res.insert(res.end(), output_layer_infos->begin(), output_layer_infos->end());
-
- return res;
-}
-
-Expected<std::vector<hailo_stream_info_t>> NetworkGroupMetadata::get_input_stream_infos(const std::string &network_name) const
-{
- auto input_layer_infos = get_input_layer_infos(network_name);
- CHECK_EXPECTED(input_layer_infos);
-
- return convert_layer_infos_to_stream_infos(input_layer_infos.value());
-}
-
-Expected<std::vector<hailo_stream_info_t>> NetworkGroupMetadata::get_output_stream_infos(const std::string &network_name) const
-{
- auto output_layer_infos = get_output_layer_infos(network_name);
- CHECK_EXPECTED(output_layer_infos);
-
- return convert_layer_infos_to_stream_infos(output_layer_infos.value());
-}
-
-Expected<std::vector<hailo_stream_info_t>> NetworkGroupMetadata::get_all_stream_infos(const std::string &network_name) const
-{
- auto input_stream_infos = get_input_stream_infos(network_name);
- CHECK_EXPECTED(input_stream_infos);
-
- auto output_stream_infos = get_output_stream_infos(network_name);
- CHECK_EXPECTED(output_stream_infos);
-
- std::vector<hailo_stream_info_t> res;
- res.reserve(input_stream_infos->size() + output_stream_infos->size());
- res.insert(res.end(), input_stream_infos->begin(), input_stream_infos->end());
- res.insert(res.end(), output_stream_infos->begin(), output_stream_infos->end());
-
- return res;
-}
-
-Expected<std::vector<hailo_vstream_info_t>> NetworkGroupMetadata::get_input_vstream_infos(const std::string &network_name) const
-{
- auto input_layer_infos = get_input_layer_infos(network_name);
- CHECK_EXPECTED(input_layer_infos);
-
- return convert_layer_infos_to_vstream_infos(input_layer_infos.value());
-}
-
-Expected<std::vector<hailo_vstream_info_t>> NetworkGroupMetadata::get_output_vstream_infos(const std::string &network_name) const
-{
- std::vector<hailo_vstream_info_t> res;
- if (m_supported_features.hailo_net_flow) {
- res = m_output_vstreams_infos;
- return res;
- }
- auto expected_output_layer_infos = get_output_layer_infos(network_name);
- CHECK_EXPECTED(expected_output_layer_infos);
- auto output_layer_infos = expected_output_layer_infos.release();
-
- res = convert_layer_infos_to_vstream_infos(output_layer_infos);
-
- hailo_status status = HAILO_SUCCESS;
- std::sort(res.begin(), res.end(),
- [this, &status](const auto &info1, const auto &info2)
- {
- const auto index1 = std::find(m_sorted_output_names.begin(), m_sorted_output_names.end(), std::string(info1.name));
- const auto index2 = std::find(m_sorted_output_names.begin(), m_sorted_output_names.end(), std::string(info2.name));
-
- if (m_sorted_output_names.end() == index1) {
- LOGGER__ERROR("Stream {} not found in sorted output names", info1.name);
- status = HAILO_INTERNAL_FAILURE;
- return false;
- }
-
- if (m_sorted_output_names.end() == index2) {
- LOGGER__ERROR("Stream {} not found in sorted output names", info2.name);
- status = HAILO_INTERNAL_FAILURE;
- return false;
- }
-
- return index1 < index2;
- });
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return res;
-}
-
-Expected<std::vector<hailo_vstream_info_t>> NetworkGroupMetadata::get_all_vstream_infos(const std::string &network_name) const
-{
- auto input_vstream_infos = get_input_vstream_infos(network_name);
- CHECK_EXPECTED(input_vstream_infos);
-
- auto output_vstream_infos = get_output_vstream_infos(network_name);
- CHECK_EXPECTED(output_vstream_infos);
-
- std::vector<hailo_vstream_info_t> res;
- res.reserve(input_vstream_infos->size() + output_vstream_infos->size());
- res.insert(res.end(), input_vstream_infos->begin(), input_vstream_infos->end());
- res.insert(res.end(), output_vstream_infos->begin(), output_vstream_infos->end());
-
- return res;
-}
-
-Expected<std::vector<std::string>> NetworkGroupMetadata::get_vstream_names_from_stream_name(const std::string &stream_name) const
-{
- std::vector<std::string> results;
- for (auto &layer_info : get_all_layer_infos()) {
- if (stream_name == layer_info.name) {
- if (layer_info.is_defused_nms) {
- return std::vector<std::string> (1, layer_info.fused_nms_layer[0].name);
- } else if (layer_info.is_mux) {
- return get_demuxes_names(layer_info);
- } else {
- return std::vector<std::string> (1, layer_info.name);
- }
- }
- }
- return make_unexpected(HAILO_NOT_FOUND);
-}
-
-Expected<std::vector<std::string>> NetworkGroupMetadata::get_stream_names_from_vstream_name(const std::string &vstream_name) const
-{
- std::vector<std::string> results;
- for (auto &layer_info : get_all_layer_infos()) {
- if (layer_info.is_mux) {
- if (is_edge_under_mux(layer_info, vstream_name)) {
- // vstream_name is a demux of the layer info
- results.push_back(layer_info.name);
- }
- } else if (layer_info.is_defused_nms) {
- if (vstream_name == layer_info.fused_nms_layer[0].name) {
- // vstream_name is the fused-layer of the layer info
- results.push_back(layer_info.name);
- }
- } else if (m_supported_features.hailo_net_flow && layer_info.direction == HAILO_D2H_STREAM) {
- results.push_back(layer_info.name);
- } else if (vstream_name == layer_info.name) {
- // vstream_name is a regular stream
- results.push_back(layer_info.name);
- }
- }
- CHECK_AS_EXPECTED(0 < results.size(), HAILO_NOT_FOUND, "Did not found vstream {}", vstream_name);
- return results;
-}
-
-std::vector<hailo_stream_info_t> NetworkGroupMetadata::convert_layer_infos_to_stream_infos(const std::vector<LayerInfo> &layer_infos) const
-{
- std::vector<hailo_stream_info_t> res;
- for (auto &layer_info : layer_infos) {
- res.push_back(LayerInfoUtils::get_stream_info_from_layer_info(layer_info));
- }
- return res;
-}
-
-std::vector<hailo_vstream_info_t> NetworkGroupMetadata::convert_layer_infos_to_vstream_infos(const std::vector<LayerInfo> &layer_infos) const
-{
- std::vector<hailo_vstream_info_t> res;
- for (auto &layer_info : layer_infos) {
- auto vstream_infos = LayerInfoUtils::get_vstream_infos_from_layer_info(layer_info);
- for (const auto &vstream_info : vstream_infos) {
- // In case of fused nms layers, several LayerInfos will contain data about the same fused layer
- if (!LayerInfoUtils::vstream_info_already_in_vector(res, vstream_info.name)) {
- res.push_back(vstream_info);
- }
- }
- }
- return res;
-}
-
-Expected<std::vector<hailo_network_info_t>> NetworkGroupMetadata::get_network_infos() const
-{
- std::vector<hailo_network_info_t> network_infos;
- auto net_group_name = network_group_name();
- network_infos.reserve(m_sorted_network_names.size());
- for (auto const &network_name : m_sorted_network_names) {
- hailo_network_info_t network_info = {};
- CHECK_AS_EXPECTED(HAILO_MAX_NETWORK_NAME_SIZE >= (network_name.length() + 1), HAILO_INTERNAL_FAILURE,
- "The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", network_name);
- memcpy(network_info.name, network_name.c_str(), network_name.length() + 1);
-
- network_infos.push_back(network_info);
- }
-
- return network_infos;
-}
-
-
-Expected<NetworkGroupMetadata> NetworkGroupMetadataPerArch::get_metadata(uint32_t partial_clusters_layout_bitmap)
-{
- if (PARTIAL_CLUSTERS_LAYOUT_IGNORE == partial_clusters_layout_bitmap) {
- // Passing PARTIAL_CLUSTERS_LAYOUT_IGNORE is magic for getting one of the metadata
- assert(0 != m_metadata_per_arch.size());
- auto result = m_metadata_per_arch.begin()->second;
- return result;
- }
- if (contains(m_metadata_per_arch, partial_clusters_layout_bitmap)) {
- auto result = m_metadata_per_arch[partial_clusters_layout_bitmap];
- return result;
- }
- LOGGER__ERROR("NetworkGroupMetadataPerArch does not contain metadata for partial_clusters_layout_bitmap {}", partial_clusters_layout_bitmap);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-void NetworkGroupMetadataPerArch::add_metadata(const NetworkGroupMetadata &metadata, uint32_t partial_clusters_layout_bitmap)
-{
- m_metadata_per_arch[partial_clusters_layout_bitmap] = metadata;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file network_group_metadata.hpp
- * @brief Contains all relevant information about a network group from the hef.
- **/
-
-#ifndef _HAILO_NETWORK_GROUP_METADATA_HPP_
-#define _HAILO_NETWORK_GROUP_METADATA_HPP_
-
-#include "layer_info.hpp"
-#include "context_switch/context_switch_actions.hpp"
-
-namespace hailort
-{
-
-constexpr const uint32_t PARTIAL_CLUSTERS_LAYOUT_IGNORE = static_cast<uint32_t>(-1);
-
-struct SupportedFeatures {
- bool padded_ddr_buffers = false;
- bool multi_network_support = false;
- bool multi_context = false;
- bool preliminary_run_asap = false;
- bool hailo_net_flow = false;
-};
-
-// For each config_stream_index we store vector of all ccw write length. The vector is used to build the config buffer.g
-using ConfigBufferInfoMap = std::unordered_map<uint8_t, std::vector<uint32_t>>;
-
-class PreliminaryContextMetadata final {
-public:
- PreliminaryContextMetadata() = default; // TODO HRT-8478: remove
- PreliminaryContextMetadata(std::vector<ContextSwitchOperation> &&operations,
- ConfigBufferInfoMap&& config_buffers_info);
- const std::vector<ContextSwitchOperation> &get_operations() const;
- const ConfigBufferInfoMap &config_buffers_info() const;
-
-private:
- std::vector<ContextSwitchOperation> m_operations;
- ConfigBufferInfoMap m_config_buffers_info;
-};
-
-class ContextMetadata final {
-public:
- explicit ContextMetadata(std::vector<ContextSwitchOperation> &&operations,
- ConfigBufferInfoMap&& config_buffers_info);
-
- const std::vector<ContextSwitchOperation> &get_operations() const;
- const ConfigBufferInfoMap &config_buffers_info() const;
-
- void add_boundary_layer(const LayerInfo &layer_info);
- void add_inter_context_layer(const LayerInfo &layer_info);
- void add_ddr_layer(const LayerInfo &layer_info);
-
- const std::vector<LayerInfo> &get_boundary_input_layers() const;
- const std::vector<LayerInfo> &get_boundary_output_layers() const;
- const std::vector<LayerInfo> &get_inter_context_input_layers() const;
- const std::vector<LayerInfo> &get_inter_context_output_layers() const;
- const std::vector<LayerInfo> &get_ddr_input_layers() const;
- const std::vector<LayerInfo> &get_ddr_output_layers() const;
-
-private:
- std::vector<ContextSwitchOperation> m_operations;
- ConfigBufferInfoMap m_config_buffers_info;
-
- std::vector<LayerInfo> m_boundary_input_layers;
- std::vector<LayerInfo> m_boundary_output_layers;
- std::vector<LayerInfo> m_inter_context_input_layers;
- std::vector<LayerInfo> m_inter_context_output_layers;
- std::vector<LayerInfo> m_ddr_input_layers;
- std::vector<LayerInfo> m_ddr_output_layers;
-};
-
-struct ConfigChannelInfo {
- uint8_t engine_index;
-};
-
-class NetworkGroupMetadata final {
-public:
- NetworkGroupMetadata() = default; // TODO HRT-8478: remove
- NetworkGroupMetadata(const std::string &network_group_name,
- PreliminaryContextMetadata &&preliminary_context,
- std::vector<ContextMetadata> &&dynamic_contexts,
- std::vector<ConfigChannelInfo> &&config_channels_info,
- std::vector<std::string> &&sorted_output_names,
- SupportedFeatures &supported_features,
- const std::vector<std::string> &sorted_network_names);
-
- std::vector<LayerInfo> get_input_layer_infos() const;
- std::vector<LayerInfo> get_output_layer_infos() const;
- std::vector<LayerInfo> get_all_layer_infos() const;
-
- Expected<std::vector<LayerInfo>> get_input_layer_infos(const std::string &network_name) const;
- Expected<std::vector<LayerInfo>> get_output_layer_infos(const std::string &network_name) const;
- Expected<std::vector<LayerInfo>> get_all_layer_infos(const std::string &network_name) const;
- Expected<LayerInfo> get_layer_info_by_stream_name(const std::string &stream_name) const;
-
- const PreliminaryContextMetadata &preliminary_context() const;
- const std::vector<ContextMetadata> &dynamic_contexts() const;
-
- const std::vector<ConfigChannelInfo> &config_channels_info() const;
-
- Expected<std::vector<hailo_stream_info_t>> get_input_stream_infos(const std::string &network_name = "") const;
- Expected<std::vector<hailo_stream_info_t>> get_output_stream_infos(const std::string &network_name = "") const;
- Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name = "") const;
-
- Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name = "") const;
- Expected<std::vector<hailo_vstream_info_t>> get_output_vstream_infos(const std::string &network_name = "") const;
- Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name = "") const;
-
- Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name) const;
- Expected<std::vector<std::string>> get_stream_names_from_vstream_name(const std::string &vstream_name) const;
-
- Expected<std::vector<hailo_network_info_t>> get_network_infos() const;
-
- const std::string &network_group_name() const
- {
- return m_network_group_name;
- }
-
- const std::string default_network_name() const
- {
- return HailoRTDefaults::get_network_name(m_network_group_name);
- }
-
- const std::vector<std::string> get_sorted_output_names() const
- {
- return m_sorted_output_names;
- }
-
- const SupportedFeatures &supported_features() const
- {
- return m_supported_features;
- }
-
- const std::vector<std::string> &get_network_names() const
- {
- return m_sorted_network_names;
- }
-
- void add_output_vstream_info(const hailo_vstream_info_t &output_vstream_info) {
- m_output_vstreams_infos.push_back(output_vstream_info);
- }
-
-private:
- std::vector<hailo_stream_info_t> convert_layer_infos_to_stream_infos(const std::vector<LayerInfo> &layer_infos) const;
- std::vector<hailo_vstream_info_t> convert_layer_infos_to_vstream_infos(const std::vector<LayerInfo> &layer_infos) const;
-
- PreliminaryContextMetadata m_preliminary_context;
- std::vector<ContextMetadata> m_dynamic_contexts;
- std::vector<ConfigChannelInfo> m_config_channels_info;
- std::string m_network_group_name;
- std::vector<std::string> m_sorted_output_names;
- SupportedFeatures m_supported_features;
- std::vector<std::string> m_sorted_network_names;
- // TODO: remove this from here! NetworkGroupMetadata should be CoreOpMetadata and contain no net_flow information! (HRT-8639)
- // To add insult to injury, this is being constructed lazyly by add_output_layer_info
- std::vector<hailo_vstream_info_t> m_output_vstreams_infos; // Valid only in case of post process
-};
-
-
-class NetworkGroupMetadataPerArch final
-{
-public:
- NetworkGroupMetadataPerArch() = default;
-
- Expected<NetworkGroupMetadata> get_metadata(uint32_t partial_clusters_layout_bitmap);
- void add_metadata(const NetworkGroupMetadata &metadata, uint32_t partial_clusters_layout_bitmap);
-
-private:
- std::map<uint32_t, NetworkGroupMetadata> m_metadata_per_arch;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_NETWORK_GROUP_METADATA_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group_scheduler.cpp
- * @brief: Network scheduler
- **/
-
-#include "network_group_scheduler.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/vdevice_network_group.hpp"
-#include "hef_internal.hpp"
-#include "vdevice_stream_multiplexer_wrapper.hpp"
-#include "tracer_macros.hpp"
-#include "scheduler_oracle.hpp"
-
-#include <fstream>
-
-namespace hailort
-{
-
-#define SINGLE_CONTEXT_BATCH_SIZE (1)
-
-// TODO: use device handles instead device count
-NetworkGroupScheduler::NetworkGroupScheduler(hailo_scheduling_algorithm_t algorithm, uint32_t device_count) :
- m_changing_current_batch_size(),
- m_should_ng_stop(),
- m_algorithm(algorithm),
- m_before_read_write_mutex(),
- m_write_read_cv(),
- m_should_monitor(false)
-#if defined(__GNUC__)
- , m_mon_tmp_output()
-#endif
-{
- for (uint32_t i = 0; i < device_count; i++) {
- m_devices.push_back(make_shared_nothrow<ActiveDeviceInfo>(i));
- }
-
- // TODO: HRT-7391 - Change scheduler monitor to work only when MON command is active
- m_should_monitor = SchedulerMon::should_monitor();
- if (m_should_monitor) {
- auto status = start_mon();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to initiate hailo monitor of networks, with status {}", status);
- }
- }
-}
-
-NetworkGroupScheduler::~NetworkGroupScheduler()
-{
- for (auto device_info : m_devices) {
- if (INVALID_NETWORK_GROUP_HANDLE != device_info->current_network_group_handle) {
- auto current_ng = m_cngs[device_info->current_network_group_handle]->get_network_group();
- auto current_network_group_bundle = std::dynamic_pointer_cast<VDeviceNetworkGroup>(current_ng);
- assert(nullptr != current_network_group_bundle);
- auto vdma_network_group = current_network_group_bundle->get_network_group_by_device_index(device_info->device_id);
- if (!vdma_network_group) {
- LOGGER__ERROR("Error retrieving network group in scheduler destructor");
- } else {
- if (HAILO_SUCCESS != VdmaConfigManager::switch_network_group(vdma_network_group.value(), nullptr, 0)) {
- LOGGER__ERROR("Error deactivating network group when destroying scheduler");
- }
- }
- }
- }
-
- if (m_should_monitor) {
- m_should_monitor = false;
- m_mon_shutdown_event->signal();
- if (m_mon_thread.joinable()) {
- m_mon_thread.join();
- }
- }
-}
-
-Expected<NetworkGroupSchedulerPtr> NetworkGroupScheduler::create_round_robin(uint32_t device_count)
-{
- auto ptr = make_shared_nothrow<NetworkGroupScheduler>(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN, device_count);
- CHECK_AS_EXPECTED(nullptr != ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return ptr;
-}
-
-std::string get_curr_pid_as_str()
-{
-#ifdef _WIN32
- auto pid = GetCurrentProcessId();
-#else
- auto pid = getpid();
-#endif
- return std::to_string(pid);
-}
-
-hailo_status NetworkGroupScheduler::start_mon()
-{
-#if defined(__GNUC__)
- m_last_measured_timestamp = std::chrono::steady_clock::now();
- m_mon_shutdown_event = Event::create_shared(Event::State::not_signalled);
- CHECK(nullptr != m_mon_shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
-
- auto tmp_file = open_temp_mon_file();
- CHECK_EXPECTED_AS_STATUS(tmp_file);
- m_mon_tmp_output = tmp_file.release();
-
- m_mon_thread = std::thread([this] ()
- {
- while (m_should_monitor) {
- auto status = m_mon_shutdown_event->wait(DEFAULT_SCHEDULER_MON_INTERVAL);
- if (HAILO_TIMEOUT == status) {
- dump_state();
- } else if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Scheduler monitor failed with status {}", status);
- return;
- }
- }
- return;
- });
-
- return HAILO_SUCCESS;
-#else
- return HAILO_NOT_IMPLEMENTED;
-#endif
-}
-
-#if defined(__GNUC__)
-Expected<std::shared_ptr<TempFile>> NetworkGroupScheduler::open_temp_mon_file()
-{
- std::string file_name = get_curr_pid_as_str();
- auto tmp_file = TempFile::create(file_name, SCHEDULER_MON_TMP_DIR);
- CHECK_EXPECTED(tmp_file);
-
- auto tmp_file_ptr = make_shared_nothrow<TempFile>(tmp_file.release());
- CHECK_AS_EXPECTED(nullptr != tmp_file_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return tmp_file_ptr;
-}
-
-void NetworkGroupScheduler::dump_state()
-{
- auto file = LockedFile::create(m_mon_tmp_output->name(), "w");
- if (HAILO_SUCCESS != file.status()) {
- LOGGER__ERROR("Failed to open and lock file {}, with status: {}", m_mon_tmp_output->name(), file.status());
- return;
- }
-
- ProtoMon mon;
- mon.set_pid(get_curr_pid_as_str());
- log_monitor_networks_infos(mon);
- log_monitor_frames_infos(mon);
-
- // Clear accumulators
- for (auto &handle_duration_pair : m_active_duration) {
- handle_duration_pair.second = 0;
- }
- for (auto &handle_fps_pair : m_fps_accumulator) {
- handle_fps_pair.second = 0;
- }
-
- if (!mon.SerializeToFileDescriptor(file->get_fd())) {
- LOGGER__ERROR("Failed to SerializeToFileDescriptor(), with errno: {}", errno);
- }
-}
-#endif
-
-std::string NetworkGroupScheduler::get_network_group_name(const scheduler_ng_handle_t &network_group_handle)
-{
- return m_cngs[network_group_handle]->get_network_group_name();
-}
-
-// TODO: HRT-7392 - Reduce core percentage when scheduler is idle
-void NetworkGroupScheduler::log_monitor_networks_infos(ProtoMon &mon)
-{
- auto curr_time = std::chrono::steady_clock::now();
- const auto measurement_duration = std::chrono::duration_cast<std::chrono::duration<double>>(curr_time - m_last_measured_timestamp).count();
-
- for (uint32_t network_group_handle = 0; network_group_handle < m_last_measured_activation_timestamp.size(); network_group_handle++) {
- assert(contains(m_active_duration, network_group_handle));
- auto curr_ng_active_time = m_active_duration[network_group_handle];
-
- for (auto device_info : m_devices) {
- if (network_group_handle == device_info->current_network_group_handle) {
- // Network is currently active
- auto time_diff = std::chrono::duration_cast<std::chrono::duration<double>>(
- curr_time - m_last_measured_activation_timestamp[device_info->current_network_group_handle]).count();
- curr_ng_active_time += time_diff;
- m_last_measured_activation_timestamp[device_info->current_network_group_handle] = curr_time;
- }
- }
-
- auto active_time = ((curr_ng_active_time * 100) / measurement_duration);
- auto outputs_count = static_cast<uint32_t>(m_cngs[network_group_handle]->get_outputs_names().size());
- auto fps = static_cast<double>((m_fps_accumulator[network_group_handle] / outputs_count) / measurement_duration);
-
- auto net_info = mon.add_networks_infos();
- net_info->set_network_name(get_network_group_name(network_group_handle));
- net_info->set_active_time(active_time);
- net_info->set_fps(fps);
- }
-
- m_last_measured_timestamp = curr_time;
-}
-
-void NetworkGroupScheduler::log_monitor_frames_infos(ProtoMon &mon)
-{
- for (uint32_t network_group_handle = 0; network_group_handle < m_cngs.size(); network_group_handle++) {
- auto net_frames_info = mon.add_net_frames_infos();
- net_frames_info->set_network_name(get_network_group_name(network_group_handle));
-
- for (auto &stream_name : m_cngs[network_group_handle]->get_inputs_names()) {
- auto stream_frames_info = net_frames_info->add_streams_frames_infos();
- stream_frames_info->set_stream_name(stream_name);
- stream_frames_info->set_stream_direction(PROTO__STREAM_DIRECTION__HOST_TO_DEVICE);
- auto status = set_h2d_frames_counters(network_group_handle, stream_name, *stream_frames_info);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to set stream's {} frames count, status = {}", stream_name, status);
- continue;
- }
- }
-
- for (auto &stream_name : m_cngs[network_group_handle]->get_outputs_names()) {
- auto stream_frames_info = net_frames_info->add_streams_frames_infos();
- stream_frames_info->set_stream_name(stream_name);
- stream_frames_info->set_stream_direction(PROTO__STREAM_DIRECTION__DEVICE_TO_HOST);
- auto status = set_d2h_frames_counters(network_group_handle, stream_name, *stream_frames_info);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to set stream's {} frames count, status = {}", stream_name, status);
- continue;
- }
- }
- }
-}
-
-hailo_status NetworkGroupScheduler::set_h2d_frames_counters(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- ProtoMonStreamFramesInfo &stream_frames_info)
-{
- assert(m_cngs.size() > network_group_handle);
- auto current_cng = m_cngs[network_group_handle]->get_network_group();
-
- auto input_stream = current_cng->get_input_stream_by_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(input_stream);
-
- InputStreamBase &vdevice_input = static_cast<InputStreamBase&>(input_stream->get());
- auto buffer_frames_size = vdevice_input.get_buffer_frames_size();
- if (HAILO_SUCCESS == buffer_frames_size.status()) {
- stream_frames_info.set_buffer_frames_size(static_cast<int32_t>(buffer_frames_size.value()));
- } else {
- stream_frames_info.set_buffer_frames_size(SCHEDULER_MON_NAN_VAL);
- }
-
- auto pending_frames_count = vdevice_input.get_pending_frames_count();
- if (HAILO_SUCCESS == pending_frames_count.status()) {
- stream_frames_info.set_pending_frames_count(static_cast<int32_t>(pending_frames_count.value()));
- } else {
- stream_frames_info.set_pending_frames_count(SCHEDULER_MON_NAN_VAL);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status NetworkGroupScheduler::set_d2h_frames_counters(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- ProtoMonStreamFramesInfo &stream_frames_info)
-{
- assert(m_cngs.size() > network_group_handle);
- auto current_cng = m_cngs[network_group_handle]->get_network_group();
-
- auto output_stream = current_cng->get_output_stream_by_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(output_stream);
-
- OutputStreamBase &vdevice_output = static_cast<OutputStreamBase&>(output_stream->get());
- auto buffer_frames_size = vdevice_output.get_buffer_frames_size();
- if (HAILO_SUCCESS == buffer_frames_size.status()) {
- stream_frames_info.set_buffer_frames_size(static_cast<int32_t>(buffer_frames_size.value()));
- } else {
- stream_frames_info.set_buffer_frames_size(SCHEDULER_MON_NAN_VAL);
- }
-
- auto pending_frames_count = vdevice_output.get_pending_frames_count();
- if (HAILO_SUCCESS == pending_frames_count.status()) {
- stream_frames_info.set_pending_frames_count(static_cast<int32_t>(pending_frames_count.value()));
- } else {
- stream_frames_info.set_pending_frames_count(SCHEDULER_MON_NAN_VAL);
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<scheduler_ng_handle_t> NetworkGroupScheduler::add_network_group(std::shared_ptr<ConfiguredNetworkGroup> added_cng)
-{
- scheduler_ng_handle_t network_group_handle = INVALID_NETWORK_GROUP_HANDLE;
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
- network_group_handle = static_cast<uint32_t>(m_cngs.size());
- TRACE(AddNetworkGroupTrace, "", added_cng->name(), DEFAULT_SCHEDULER_TIMEOUT.count(), DEFAULT_SCHEDULER_MIN_THRESHOLD, network_group_handle);
-
- auto stream_infos = added_cng->get_all_stream_infos();
- CHECK_EXPECTED(stream_infos);
-
- auto scheduled_ng = ScheduledNetworkGroup::create(added_cng, stream_infos.value());
- CHECK_EXPECTED(scheduled_ng);
-
- m_cngs.emplace_back(scheduled_ng.release());
-
- m_changing_current_batch_size[network_group_handle] = false;
-
- for (const auto &stream_info : stream_infos.value()) {
- m_should_ng_stop[network_group_handle][stream_info.name] = false;
- }
-
- for (auto& device_info : m_devices) {
- for (const auto &stream_info : stream_infos.value()) {
- if (HAILO_H2D_STREAM == stream_info.direction) {
- device_info->current_cycle_requested_transferred_frames_h2d[network_group_handle][stream_info.name] = 0;
- } else {
- device_info->current_cycle_finished_transferred_frames_d2h[network_group_handle][stream_info.name] = 0;
- device_info->current_cycle_finished_read_frames_d2h[network_group_handle][stream_info.name] = 0;
- }
- }
- }
-
- // Monitor members
- m_last_measured_activation_timestamp[network_group_handle] = {};
- m_active_duration[network_group_handle] = 0;
- m_fps_accumulator[network_group_handle] = 0;
- }
- m_write_read_cv.notify_all();
- return network_group_handle;
-}
-
-bool NetworkGroupScheduler::is_network_group_active(const scheduler_ng_handle_t &network_group_handle)
-{
- for (auto device_info : m_devices) {
- if (network_group_handle == device_info->current_network_group_handle) {
- return true;
- }
- }
-
- return false;
-}
-
-bool NetworkGroupScheduler::is_switching_current_network_group(const scheduler_ng_handle_t &network_group_handle)
-{
- for (auto device_info : m_devices) {
- if (network_group_handle == device_info->current_network_group_handle && device_info->is_switching_network_group) {
- return true;
- }
- }
-
- return false;
-}
-
-bool NetworkGroupScheduler::is_multi_device()
-{
- return m_devices.size() > 1;
-}
-
-hailo_status NetworkGroupScheduler::wait_for_write(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- const std::chrono::milliseconds &timeout, const std::function<bool()> &should_cancel)
-{
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
- hailo_status status = HAILO_SUCCESS;
- auto wait_res = m_write_read_cv.wait_for(lock, timeout, [this, network_group_handle, stream_name, &should_cancel, &status] {
-
- if (should_cancel()) {
- status = HAILO_STREAM_ABORTED_BY_USER;
- return true; // return true so that the wait will finish
- }
-
- if (should_ng_stop(network_group_handle)) {
- status = HAILO_STREAM_ABORTED_BY_USER;
- return true; // return true so that the wait will finish
- }
-
- auto should_wait = should_wait_for_write(network_group_handle, stream_name);
- if (HAILO_SUCCESS != should_wait.status()) {
- status = should_wait.status();
- return true; // return true so that the wait will finish
- }
- return !should_wait.value();
- });
- CHECK(wait_res, HAILO_TIMEOUT, "{} (H2D) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return status;
- }
- CHECK_SUCCESS(status);
-
- m_cngs[network_group_handle]->mark_frame_sent();
- m_cngs[network_group_handle]->requested_write_frames().increase(stream_name);
- }
- m_write_read_cv.notify_all();
-
- return HAILO_SUCCESS;
-}
-
-Expected<bool> NetworkGroupScheduler::should_wait_for_write(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name)
-{
- auto scheduled_ng = m_cngs[network_group_handle];
-
- if (should_ng_stop(network_group_handle)) {
- return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
- }
-
- auto pre_transfer_h2d_frames = scheduled_ng->requested_write_frames(stream_name) + scheduled_ng->finished_write_frames(stream_name);
- bool has_written_max_batch_size = ((scheduled_ng->use_dynamic_batch_flow() || is_multi_device()) &&
- ((scheduled_ng->get_max_batch_size() * m_devices.size()) == pre_transfer_h2d_frames));
-
- bool should_stop_writing_because_switching = ((!(scheduled_ng->use_dynamic_batch_flow() || is_multi_device())) &&
- (is_switching_current_network_group(network_group_handle) || m_changing_current_batch_size[network_group_handle]) &&
- is_network_group_active(network_group_handle) && scheduled_ng->has_input_written_most_frames(stream_name));
-
- auto total_written_frames = scheduled_ng->total_written_frames_count()[stream_name];
- auto min_finished_read = scheduled_ng->finished_read_frames_min_value();
- auto ongoing_frames = (min_finished_read < total_written_frames) ? (total_written_frames - min_finished_read) : 0;
- bool has_enough_space_for_writes = scheduled_ng->has_enough_space_in_read_buffers(ongoing_frames);
-
- if (has_written_max_batch_size || should_stop_writing_because_switching || (!has_enough_space_for_writes)) {
- return true;
- }
-
- return false;
-}
-
-hailo_status NetworkGroupScheduler::signal_write_finish(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name)
-{
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
- auto scheduled_ng = m_cngs[network_group_handle];
-
- if (should_ng_stop(network_group_handle)) {
- return HAILO_STREAM_ABORTED_BY_USER;
- }
-
- scheduled_ng->finished_write_frames().increase(stream_name);
- scheduled_ng->requested_write_frames().decrease(stream_name);
-
- auto device_id = NetworkGroupSchedulerOracle::get_avail_device(*this, network_group_handle);
- if (INVALID_DEVICE_ID != device_id) {
- auto status = switch_network_group(network_group_handle, device_id);
- CHECK_SUCCESS(status);
- }
-
- for (auto &device_info : m_devices) {
- if (device_info->current_network_group_handle == network_group_handle && !(scheduled_ng->use_dynamic_batch_flow() || is_multi_device())) {
- auto status = send_all_pending_buffers(network_group_handle, device_info->device_id);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("send_all_pending_buffers has failed with status=HAILO_STREAM_ABORTED_BY_USER");
- return status;
- }
- CHECK_SUCCESS(status);
- }
- }
- }
- m_write_read_cv.notify_all();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status NetworkGroupScheduler::switch_network_group(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id, bool /*keep_nn_config*/)
-{
- auto scheduled_ng = m_cngs[network_group_handle];
- auto curr_device_info = m_devices[device_id];
-
- // initialize current cycle maps
- for (const auto &name : scheduled_ng->get_inputs_names()) {
- curr_device_info->current_cycle_requested_transferred_frames_h2d[network_group_handle][name] = 0;
- }
-
- for (const auto &name : scheduled_ng->get_outputs_names()) {
- curr_device_info->current_cycle_finished_transferred_frames_d2h[network_group_handle][name] = 0;
- curr_device_info->current_cycle_finished_read_frames_d2h[network_group_handle][name] = 0;
- }
-
- uint16_t batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE;
- uint16_t burst_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE;
- if (scheduled_ng->use_dynamic_batch_flow()) {
- burst_size = std::min(static_cast<uint16_t>(scheduled_ng->finished_write_frames_min_value()), scheduled_ng->get_max_batch_size());
- batch_size = burst_size;
- } else {
- burst_size = is_multi_device() ? static_cast<uint16_t>(scheduled_ng->finished_write_frames_min_value()) : SINGLE_CONTEXT_BATCH_SIZE;
- batch_size = SINGLE_CONTEXT_BATCH_SIZE;
- }
-
- bool has_same_batch_size_as_previous = (curr_device_info->current_batch_size == batch_size);
- curr_device_info->current_batch_size = batch_size;
- curr_device_info->current_burst_size = burst_size;
-
- m_last_measured_activation_timestamp[network_group_handle] = std::chrono::steady_clock::now();
-
- if (curr_device_info->current_network_group_handle != network_group_handle) {
- curr_device_info->is_switching_network_group = false;
- }
-
- if ((network_group_handle != curr_device_info->current_network_group_handle) || (!has_same_batch_size_as_previous)) {
- assert(m_cngs.size() > network_group_handle);
- auto next_active_cng = scheduled_ng->get_network_group();
- auto next_active_cng_wrapper = std::dynamic_pointer_cast<VDeviceNetworkGroup>(next_active_cng);
- assert(nullptr != next_active_cng_wrapper);
- auto next_active_cng_expected = next_active_cng_wrapper->get_network_group_by_device_index(curr_device_info->device_id);
- CHECK_EXPECTED_AS_STATUS(next_active_cng_expected);
-
- std::shared_ptr<VdmaConfigNetworkGroup> current_active_vdma_cng = nullptr;
- if (curr_device_info->current_network_group_handle != INVALID_NETWORK_GROUP_HANDLE) {
- reset_current_ng_timestamps(curr_device_info->device_id);
- auto current_active_cng = m_cngs[curr_device_info->current_network_group_handle]->get_network_group();
- auto current_active_cng_bundle = std::dynamic_pointer_cast<VDeviceNetworkGroup>(current_active_cng);
- assert(nullptr != current_active_cng_bundle);
- auto current_active_cng_expected = current_active_cng_bundle->get_network_group_by_device_index(curr_device_info->device_id);
- CHECK_EXPECTED_AS_STATUS(current_active_cng_expected);
- current_active_vdma_cng = current_active_cng_expected.release();
- }
-
- TRACE(SwitchNetworkGroupTrace, "", network_group_handle);
- auto status = VdmaConfigManager::switch_network_group(current_active_vdma_cng, next_active_cng_expected.value(), batch_size);
- CHECK_SUCCESS(status, "Failed switching network group");
-
- // Register to get interrupts - has to be after network group is activated
- for (auto &output_stream : next_active_cng_expected.value()->get_output_streams()) {
- OutputStreamBase &vdevice_output = static_cast<OutputStreamBase&>(output_stream.get());
- status = vdevice_output.register_for_d2h_interrupts(
- [this, name = output_stream.get().name(), format = vdevice_output.get_layer_info().format.order, scheduled_ng, network_group_handle, device_id]
- (uint32_t frames) {
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
- if (hailo_format_order_t::HAILO_FORMAT_ORDER_HAILO_NMS != format) {
- TRACE(OutputVdmaEnqueueTrace, "", network_group_handle, name, frames);
- // TODO: Remove d2h_finished_transferred_frames and use current_cycle_finished_transferred_frames_d2h instead
- scheduled_ng->d2h_finished_transferred_frames(name) += frames;
- m_devices[device_id]->current_cycle_finished_transferred_frames_d2h[network_group_handle][name] += frames;
- }
- if (!(is_multi_device() || scheduled_ng->use_dynamic_batch_flow()) || has_ng_drained_everything(network_group_handle, device_id)) {
- choose_next_network_group(device_id);
- }
- }
- m_write_read_cv.notify_all();
- });
- CHECK_SUCCESS(status);
- }
- }
-
- scheduled_ng->set_last_run_timestamp(std::chrono::steady_clock::now()); // Mark timestamp on activation
- curr_device_info->current_network_group_handle = network_group_handle;
-
- // Finished switching batch size
- m_changing_current_batch_size[network_group_handle] = false;
-
- auto status = send_all_pending_buffers(network_group_handle, device_id);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("send_all_pending_buffers has failed with status=HAILO_STREAM_ABORTED_BY_USER");
- return status;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status NetworkGroupScheduler::send_all_pending_buffers(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id)
-{
- auto current_device_info = m_devices[device_id];
- if ((INVALID_NETWORK_GROUP_HANDLE == current_device_info->current_network_group_handle) || (current_device_info->current_network_group_handle != network_group_handle)) {
- return HAILO_SUCCESS;
- }
-
- auto scheduled_ng = m_cngs[network_group_handle];
-
- while(true) {
- auto finished_send = false;
- for (const auto &name : scheduled_ng->get_inputs_names()) {
- if ((scheduled_ng->finished_write_frames(name) == 0) || (((scheduled_ng->use_dynamic_batch_flow()) || (is_multi_device())) &&
- ((current_device_info->current_cycle_requested_transferred_frames_h2d[network_group_handle][name] == current_device_info->current_burst_size)))) {
- finished_send = true;
- break;
- }
- }
- if (finished_send) {
- break;
- }
-
- for (const auto &name : scheduled_ng->get_inputs_names()) {
- auto status = send_pending_buffer(network_group_handle, name, device_id);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("send_pending_buffer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
- return status;
- }
- CHECK_SUCCESS(status);
- }
- scheduled_ng->push_device_index(device_id);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status NetworkGroupScheduler::send_pending_buffer(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- uint32_t device_id)
-{
- assert(m_cngs.size() > network_group_handle);
- auto scheduled_ng = m_cngs[network_group_handle];
-
- auto current_cng = scheduled_ng->get_network_group();
- auto input_stream = current_cng->get_input_stream_by_name(stream_name);
- CHECK_EXPECTED_AS_STATUS(input_stream);
-
- VDeviceInputStreamMultiplexerWrapper &vdevice_input = static_cast<VDeviceInputStreamMultiplexerWrapper&>(input_stream->get());
- TRACE(InputVdmaEnqueueTrace, "", network_group_handle, stream_name);
- auto status = vdevice_input.send_pending_buffer(device_id);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("send_pending_buffer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
- return status;
- }
- CHECK_SUCCESS(status);
-
- scheduled_ng->h2d_requested_transferred_frames().increase(stream_name);
- m_devices[device_id]->current_cycle_requested_transferred_frames_h2d[network_group_handle][stream_name]++;
- scheduled_ng->finished_write_frames().decrease(stream_name);
-
- scheduled_ng->h2d_finished_transferred_frames().increase(stream_name);
- scheduled_ng->h2d_requested_transferred_frames().decrease(stream_name);
-
- if (should_ng_stop(network_group_handle)) {
- return HAILO_STREAM_ABORTED_BY_USER;
- }
-
- return HAILO_SUCCESS;
-}
-
-void NetworkGroupScheduler::reset_current_ng_timestamps(uint32_t device_id)
-{
- auto curr_device_info = m_devices[device_id];
- if (INVALID_NETWORK_GROUP_HANDLE == curr_device_info->current_network_group_handle) {
- return;
- }
-
- m_cngs[curr_device_info->current_network_group_handle]->set_last_run_timestamp(std::chrono::steady_clock::now()); // Mark timestamp on de-activation
-
- const auto active_duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
- std::chrono::steady_clock::now() - m_last_measured_activation_timestamp[curr_device_info->current_network_group_handle]).count();
-
- assert(contains(m_active_duration, curr_device_info->current_network_group_handle));
- m_active_duration[curr_device_info->current_network_group_handle] += active_duration_sec;
-}
-
-NetworkGroupScheduler::ReadyInfo NetworkGroupScheduler::is_network_group_ready(const scheduler_ng_handle_t &network_group_handle, bool check_threshold, uint32_t device_id)
-{
- ReadyInfo result;
- result.is_ready = false;
-
- if (should_ng_stop(network_group_handle)) {
- // Do not switch to an aborted network group
- return result;
- }
-
- auto scheduled_ng = m_cngs[network_group_handle];
- // Check if there arent any write requests
- bool has_pending_writes = scheduled_ng->finished_write_frames_min_value() > 0;
-
- // Check if there arent any read requests
- bool has_pending_user_reads = false;
- for (const auto &name : scheduled_ng->get_outputs_names()) {
- if (scheduled_ng->requested_read_frames(name) > 0) {
- has_pending_user_reads = true;
- break;
- }
- }
-
- std::vector<bool> over_threshold;
- over_threshold.reserve(scheduled_ng->get_inputs_names().size());
- std::vector<bool> over_timeout;
- over_timeout.reserve(scheduled_ng->get_inputs_names().size());
-
- if (check_threshold) {
- for (const auto &name : scheduled_ng->get_inputs_names()) {
- auto threshold_exp = scheduled_ng->get_threshold(name);
- if (!threshold_exp) {
- LOGGER__ERROR("Failed to get threshold for stream {}", name);
- return result;
- }
- auto threshold = (DEFAULT_SCHEDULER_MIN_THRESHOLD == threshold_exp.value()) ? 1 : threshold_exp.value();
- auto timeout_exp = scheduled_ng->get_timeout();
- if (!timeout_exp) {
- LOGGER__ERROR("Failed to get timeout for stream {}", name);
- return result;
- }
- auto timeout = timeout_exp.release();
-
- // Check if there arent enough write requests to reach threshold and timeout didnt passed
- auto write_requests = scheduled_ng->requested_write_frames(name) + scheduled_ng->finished_write_frames(name);
- auto stream_over_threshold = write_requests >= threshold;
- auto stream_over_timeout = timeout <= (std::chrono::steady_clock::now() - scheduled_ng->get_last_run_timestamp());
- over_threshold.push_back(stream_over_threshold);
- over_timeout.push_back(stream_over_timeout);
- if (stream_over_threshold || stream_over_timeout) {
- continue;
- } else {
- result.is_ready = false;
- return result;
- }
- }
- }
-
- auto has_pending_vdma_frames = get_max_value_of_unordered_map(m_devices[device_id]->current_cycle_requested_transferred_frames_h2d[network_group_handle]) !=
- get_min_value_of_unordered_map(m_devices[device_id]->current_cycle_finished_read_frames_d2h[network_group_handle]);
-
- result.threshold = std::all_of(over_threshold.begin(), over_threshold.end(), [](auto over) { return over; });
- result.timeout = std::all_of(over_timeout.begin(), over_timeout.end(), [](auto over) { return over; });
- result.is_ready = has_pending_writes && has_pending_user_reads && (!has_pending_vdma_frames);
-
- return result;
-}
-
-Expected<uint32_t> NetworkGroupScheduler::wait_for_read(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- const std::chrono::milliseconds &timeout)
-{
- uint32_t device_id = INVALID_DEVICE_ID;
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
- auto scheduled_ng = m_cngs[network_group_handle];
-
- scheduled_ng->requested_read_frames().increase(stream_name);
-
- hailo_status status = HAILO_SUCCESS;
- auto wait_res = m_write_read_cv.wait_for(lock, timeout, [this, network_group_handle, scheduled_ng, stream_name, &status] {
-
- if (should_ng_stop(network_group_handle)) {
- status = HAILO_STREAM_ABORTED_BY_USER;
- return true; // return true so that the wait will finish
- }
-
- auto device_id = NetworkGroupSchedulerOracle::get_avail_device(*this, network_group_handle);
- if (INVALID_DEVICE_ID != device_id) {
- status = switch_network_group(network_group_handle, device_id);
- if (HAILO_SUCCESS != status) {
- return true; // return true so that the wait will finish
- }
- }
-
- return scheduled_ng->can_stream_read(stream_name);
- });
- CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "{} (D2H) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- scheduled_ng->ongoing_read_frames().increase(stream_name);
- scheduled_ng->requested_read_frames().decrease(stream_name);
- device_id = scheduled_ng->pop_device_index(stream_name);
- }
- m_write_read_cv.notify_all();
-
- return device_id;
-}
-
-
-hailo_status NetworkGroupScheduler::signal_read_finish(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name, uint32_t device_id)
-{
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
- auto scheduled_ng = m_cngs[network_group_handle];
-
- scheduled_ng->finished_read_frames().increase(stream_name);
- m_devices[device_id]->current_cycle_finished_read_frames_d2h[network_group_handle][stream_name]++;
- scheduled_ng->d2h_finished_transferred_frames().decrease(stream_name);
- scheduled_ng->ongoing_read_frames().decrease(stream_name);
- m_fps_accumulator[network_group_handle]++;
-
- decrease_ng_counters(network_group_handle);
- }
- m_write_read_cv.notify_all();
-
- return HAILO_SUCCESS;
-}
-
-
-bool NetworkGroupScheduler::has_ng_finished(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id)
-{
- if (INVALID_NETWORK_GROUP_HANDLE == network_group_handle) {
- return true; // If no network group is running, consider it as finished
- }
-
- auto scheduled_ng = m_cngs[network_group_handle];
-
- if (scheduled_ng->use_dynamic_batch_flow() || is_multi_device()) {
- for (const auto &name : scheduled_ng->get_outputs_names()) {
- if (m_devices[device_id]->current_cycle_finished_read_frames_d2h[network_group_handle][name] < m_devices[device_id]->current_batch_size) {
- return false;
- }
- }
-
- return true;
- }
-
- uint32_t written_frames = get_max_value_of_unordered_map(scheduled_ng->total_written_frames_count());
- for (const auto &name : scheduled_ng->get_outputs_names()) {
- if (scheduled_ng->finished_read_frames(name) < written_frames) {
- return false;
- }
- }
- return true;
-}
-
-void NetworkGroupScheduler::decrease_ng_counters(const scheduler_ng_handle_t &network_group_handle)
-{
- return m_cngs[network_group_handle]->decrease_current_ng_counters();
-}
-
-bool NetworkGroupScheduler::has_ng_drained_everything(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id)
-{
- if (INVALID_NETWORK_GROUP_HANDLE == network_group_handle) {
- // If no network group is running, consider it as drained
- return true;
- }
-
- if (ng_all_streams_aborted(network_group_handle)) {
- // We treat NG as drained only if all streams are aborted - to make sure there aren't any ongoing transfers
- return true;
- }
-
- if ((!m_cngs[network_group_handle]->is_nms()) && (is_multi_device() || m_cngs[network_group_handle]->use_dynamic_batch_flow())) {
- auto current_device_info = m_devices[device_id];
- auto max_transferred_h2d = get_max_value_of_unordered_map(current_device_info->current_cycle_requested_transferred_frames_h2d[network_group_handle]);
- auto min_transferred_d2h = get_min_value_of_unordered_map(current_device_info->current_cycle_finished_transferred_frames_d2h[network_group_handle]);
-
- return (max_transferred_h2d == min_transferred_d2h);
- }
-
- return m_cngs[network_group_handle]->has_ng_drained_everything(!(m_cngs[network_group_handle]->use_dynamic_batch_flow() || is_multi_device()));
-}
-
-hailo_status NetworkGroupScheduler::enable_stream(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name)
-{
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
- if (!m_should_ng_stop[network_group_handle][stream_name]) {
- return HAILO_SUCCESS;
- }
-
- m_should_ng_stop[network_group_handle][stream_name] = false;
- }
- m_write_read_cv.notify_all();
- return HAILO_SUCCESS;
-}
-
-hailo_status NetworkGroupScheduler::disable_stream(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name)
-{
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
- if (m_should_ng_stop[network_group_handle][stream_name]) {
- return HAILO_SUCCESS;
- }
-
- m_should_ng_stop[network_group_handle][stream_name] = true;
- }
- m_write_read_cv.notify_all();
- return HAILO_SUCCESS;
-}
-
-hailo_status NetworkGroupScheduler::set_timeout(const scheduler_ng_handle_t &network_group_handle, const std::chrono::milliseconds &timeout, const std::string &/*network_name*/)
-{
- // TODO: call in loop for set_timeout with the relevant stream-names (of the given network)
- return m_cngs[network_group_handle]->set_timeout(timeout);
-}
-
-hailo_status NetworkGroupScheduler::set_threshold(const scheduler_ng_handle_t &network_group_handle, uint32_t threshold, const std::string &/*network_name*/)
-{
- // TODO: call in loop for set_timeout with the relevant stream-names (of the given network)
- return m_cngs[network_group_handle]->set_threshold(threshold);
-}
-
-void NetworkGroupScheduler::choose_next_network_group(size_t device_id)
-{
- if (!m_devices[device_id]->is_switching_network_group) {
- NetworkGroupSchedulerOracle::choose_next_model(*this, m_devices[device_id]->device_id);
- }
-}
-
-bool NetworkGroupScheduler::should_ng_stop(const scheduler_ng_handle_t &network_group_handle)
-{
- for (const auto &name_flag_pair : m_should_ng_stop[network_group_handle]) {
- if (name_flag_pair.second) {
- return true;
- }
- }
-
- return false;
-}
-
-bool NetworkGroupScheduler::ng_all_streams_aborted(const scheduler_ng_handle_t &network_group_handle)
-{
- for (const auto &name_flag_pair : m_should_ng_stop[network_group_handle]) {
- if (!name_flag_pair.second) {
- return false;
- }
- }
- return true;
-}
-
-void NetworkGroupScheduler::notify_all()
-{
- {
- // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
- }
- m_write_read_cv.notify_all();
-}
-
-void NetworkGroupScheduler::mark_failed_write(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name)
-{
- {
- std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
- assert(m_cngs.size() > network_group_handle);
- m_cngs[network_group_handle]->requested_write_frames().decrease(stream_name);
- }
- m_write_read_cv.notify_all();
-}
-
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group_scheduler.hpp
- * @brief Class declaration for NetworkGroupScheduler that schedules network groups to be active depending on the scheduling algorithm.
- **/
-
-#ifndef _HAILO_NETWORK_GROUP_SCHEDULER_HPP_
-#define _HAILO_NETWORK_GROUP_SCHEDULER_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/network_group.hpp"
-#include "common/utils.hpp"
-#include "common/filesystem.hpp"
-#include "scheduler_mon.hpp"
-#include "scheduled_network_group.hpp"
-
-#include <condition_variable>
-
-#define DEFAULT_SCHEDULER_TIMEOUT (std::chrono::milliseconds(0))
-#define DEFAULT_SCHEDULER_MIN_THRESHOLD (0)
-
-namespace hailort
-{
-
-#define INVALID_NETWORK_GROUP_HANDLE (UINT32_MAX)
-#define INVALID_DEVICE_ID (UINT32_MAX)
-
-using scheduler_ng_handle_t = uint32_t;
-
-class NetworkGroupScheduler;
-using NetworkGroupSchedulerPtr = std::shared_ptr<NetworkGroupScheduler>;
-
-// We use mostly weak pointer for the scheduler to prevent circular dependency of the pointers
-using NetworkGroupSchedulerWeakPtr = std::weak_ptr<NetworkGroupScheduler>;
-
-using stream_name_t = std::string;
-
-struct ActiveDeviceInfo {
- ActiveDeviceInfo(uint32_t device_id) : current_network_group_handle(INVALID_NETWORK_GROUP_HANDLE),
- next_network_group_handle(INVALID_NETWORK_GROUP_HANDLE), is_switching_network_group(false), current_batch_size(0), current_burst_size(0),
- current_cycle_requested_transferred_frames_h2d(), current_cycle_finished_transferred_frames_d2h(), current_cycle_finished_read_frames_d2h(),
- device_id(device_id)
- {}
- scheduler_ng_handle_t current_network_group_handle;
- scheduler_ng_handle_t next_network_group_handle;
- std::atomic_bool is_switching_network_group;
- std::atomic_uint32_t current_batch_size;
- std::atomic_uint32_t current_burst_size;
- std::unordered_map<scheduler_ng_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_requested_transferred_frames_h2d;
- std::unordered_map<scheduler_ng_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_finished_transferred_frames_d2h;
- std::unordered_map<scheduler_ng_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_finished_read_frames_d2h;
- uint32_t device_id;
-};
-
-class NetworkGroupScheduler
-{
-public:
- static Expected<NetworkGroupSchedulerPtr> create_round_robin(uint32_t device_count);
- NetworkGroupScheduler(hailo_scheduling_algorithm_t algorithm, uint32_t device_count);
-
- virtual ~NetworkGroupScheduler();
- NetworkGroupScheduler(const NetworkGroupScheduler &other) = delete;
- NetworkGroupScheduler &operator=(const NetworkGroupScheduler &other) = delete;
- NetworkGroupScheduler &operator=(NetworkGroupScheduler &&other) = delete;
- NetworkGroupScheduler(NetworkGroupScheduler &&other) noexcept = delete;
-
- hailo_scheduling_algorithm_t algorithm()
- {
- return m_algorithm;
- }
-
- Expected<scheduler_ng_handle_t> add_network_group(std::shared_ptr<ConfiguredNetworkGroup> added_cng);
-
- hailo_status wait_for_write(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- const std::chrono::milliseconds &timeout, const std::function<bool()> &should_cancel);
- hailo_status signal_write_finish(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name);
- Expected<uint32_t> wait_for_read(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- const std::chrono::milliseconds &timeout);
- hailo_status signal_read_finish(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name, uint32_t device_id);
-
- hailo_status enable_stream(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name);
- hailo_status disable_stream(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name);
-
- hailo_status set_timeout(const scheduler_ng_handle_t &network_group_handle, const std::chrono::milliseconds &timeout, const std::string &network_name);
- hailo_status set_threshold(const scheduler_ng_handle_t &network_group_handle, uint32_t threshold, const std::string &network_name);
-
- void notify_all();
- void mark_failed_write(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name);
-
-protected:
- struct ReadyInfo {
- bool threshold = false;
- bool timeout = false;
- bool is_ready = false;
- };
-
- void choose_next_network_group(size_t device_id);
- ReadyInfo is_network_group_ready(const scheduler_ng_handle_t &network_group_handle, bool check_threshold, uint32_t device_id);
-
- std::vector<std::shared_ptr<ActiveDeviceInfo>> m_devices;
- std::unordered_map<scheduler_ng_handle_t, std::atomic_bool> m_changing_current_batch_size;
- std::unordered_map<scheduler_ng_handle_t, std::map<stream_name_t, std::atomic_bool>> m_should_ng_stop;
-
- std::vector<std::shared_ptr<ScheduledNetworkGroup>> m_cngs;
-
-private:
- hailo_status switch_network_group(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id,
- bool keep_nn_config = false);
- void reset_current_ng_timestamps(uint32_t device_id);
-
- Expected<bool> should_wait_for_write(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name);
- hailo_status send_all_pending_buffers(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id);
- hailo_status send_pending_buffer(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name, uint32_t device_id);
-
- void decrease_ng_counters(const scheduler_ng_handle_t &network_group_handle);
- bool has_ng_drained_everything(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id);
- bool has_ng_finished(const scheduler_ng_handle_t &network_group_handle, uint32_t device_id);
- bool should_ng_stop(const scheduler_ng_handle_t &network_group_handle);
- bool ng_all_streams_aborted(const scheduler_ng_handle_t &network_group_handle);
-
- std::string get_network_group_name(const scheduler_ng_handle_t &network_group_handle);
- bool is_network_group_active(const scheduler_ng_handle_t &network_group_handle);
- bool is_switching_current_network_group(const scheduler_ng_handle_t &network_group_handle);
- bool is_multi_device();
-
- hailo_status start_mon();
- void log_monitor_networks_infos(ProtoMon &mon);
- void log_monitor_frames_infos(ProtoMon &mon);
- hailo_status set_h2d_frames_counters(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- ProtoMonStreamFramesInfo &stream_frames_info);
- hailo_status set_d2h_frames_counters(const scheduler_ng_handle_t &network_group_handle, const std::string &stream_name,
- ProtoMonStreamFramesInfo &stream_frames_info);
-#if defined(__GNUC__)
- Expected<std::shared_ptr<TempFile>> open_temp_mon_file();
- void dump_state();
-#endif
-
- hailo_scheduling_algorithm_t m_algorithm;
- std::mutex m_before_read_write_mutex;
- std::condition_variable m_write_read_cv;
- scheduler_ng_handle_t m_last_choosen_network_group;
-
- // Params for the scheduler MON
- std::atomic_bool m_should_monitor;
- std::thread m_mon_thread;
- EventPtr m_mon_shutdown_event;
-#if defined(__GNUC__)
- std::shared_ptr<TempFile> m_mon_tmp_output;
-#endif
- std::chrono::time_point<std::chrono::steady_clock> m_last_measured_timestamp;
- std::unordered_map<scheduler_ng_handle_t, std::chrono::time_point<std::chrono::steady_clock>> m_last_measured_activation_timestamp;
- // TODO: Consider adding Accumulator classes for more info (min, max, mean, etc..)
- std::unordered_map<scheduler_ng_handle_t, double> m_active_duration;
- std::unordered_map<scheduler_ng_handle_t, std::atomic_uint32_t> m_fps_accumulator;
-
- friend class NetworkGroupSchedulerOracle;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_NETWORK_GROUP_SCHEDULER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_rate_calculator.cpp
- * @brief: Network rate calculator
- **/
-
-#include "hailo/network_rate_calculator.hpp"
-#include "hailo/hailort.h"
-
-#include "eth_stream.hpp"
-#include "common/utils.hpp"
-#include <numeric>
-#include <algorithm>
-
-namespace hailort
-{
-
-Expected<StreamInfoVector> NetworkUdpRateCalculator::get_streams_from_hef(Hef* hef, const std::string &network_group_name)
-{
- assert(nullptr != hef);
-
- auto all_streams_infos = hef->get_all_stream_infos(network_group_name);
- CHECK_EXPECTED(all_streams_infos);
-
- // We expect to have two or more streams (atleast one for input and one for output)
- if (all_streams_infos->size() < 2) {
- return make_unexpected(HAILO_INVALID_HEF);
- }
-
- return all_streams_infos;
-}
-
-NetworkUdpRateCalculator::NetworkUdpRateCalculator(std::map<std::string, uint32_t> &&input_edge_shapes,
- std::map<std::string, uint32_t> &&output_edge_shapes) :
- m_input_edge_shapes(std::move(input_edge_shapes)),
- m_output_edge_shapes(std::move(output_edge_shapes)) {}
-
-Expected<NetworkUdpRateCalculator> NetworkUdpRateCalculator::create(Hef* hef, const std::string &network_group_name)
-{
- if (hef == nullptr) {
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
- const auto stream_infos = get_streams_from_hef(hef, network_group_name);
- if (!stream_infos) {
- return make_unexpected(stream_infos.status());
- }
-
- // Working with HEF for rate_calcs assums that all streams are udp streams
- std::map<std::string, uint32_t> input_udp_edge_shapes;
- std::map<std::string, uint32_t> output_udp_edge_shapes;
- for (auto &info : stream_infos.value()) {
- if (HAILO_H2D_STREAM == info.direction) {
- input_udp_edge_shapes.insert(std::make_pair(info.name, info.hw_frame_size));
- } else if (HAILO_D2H_STREAM == info.direction) {
- output_udp_edge_shapes.insert(std::make_pair(info.name, info.hw_frame_size));
- } else {
- LOGGER__ERROR("Invalid stream direction for stream {}.", info.name);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
- }
-
- return NetworkUdpRateCalculator(std::move(input_udp_edge_shapes), std::move(output_udp_edge_shapes));
-}
-
-Expected<NetworkUdpRateCalculator> NetworkUdpRateCalculator::create(ConfiguredNetworkGroup &net_group)
-{
- auto udp_input_streams = net_group.get_input_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
- CHECK_AS_EXPECTED(!udp_input_streams.empty(), HAILO_INVALID_OPERATION,
- "There are no udp input streams in this network_group.");
- auto udp_output_streams = net_group.get_output_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
-
- std::map<std::string, uint32_t> input_udp_edge_shapes;
- for (const auto &stream : udp_input_streams) {
- input_udp_edge_shapes.insert(std::make_pair(stream.get().name(),
- stream.get().get_info().hw_frame_size));
- }
- std::map<std::string, uint32_t> output_udp_edge_shapes;
- for (const auto &stream : udp_output_streams) {
- output_udp_edge_shapes.insert(std::make_pair(stream.get().name(),
- stream.get().get_info().hw_frame_size));
- }
-
- return NetworkUdpRateCalculator(std::move(input_udp_edge_shapes), std::move(output_udp_edge_shapes));
-}
-
-Expected<std::map<std::string, uint32_t>> NetworkUdpRateCalculator::calculate_inputs_bandwith(uint32_t fps,
- uint32_t max_supported_bandwidth)
-{
- if (1 > fps) {
- fps = 1;
- LOGGER__WARNING("FPS for rate calculations cannot be smaller than 1. calculating rate_limiter with fps=1.");
- }
-
- std::map<std::string, uint32_t> input_rates;
- std::transform(m_input_edge_shapes.begin(), m_input_edge_shapes.end(), std::inserter(input_rates, input_rates.end()),
- [fps](auto &input_edge_pair) { return std::make_pair(input_edge_pair.first, (fps * input_edge_pair.second)); });
-
- std::map<std::string, uint32_t> output_rates = {};
- std::transform(m_output_edge_shapes.begin(), m_output_edge_shapes.end(), std::inserter(output_rates, output_rates.end()),
- [fps](auto &output_edge_pair) { return std::make_pair(output_edge_pair.first, (fps * output_edge_pair.second)); });
-
- uint32_t total_input_rate = std::accumulate(input_rates.begin(), input_rates.end(), 0,
- [](int value, const auto &p) { return value + p.second; });
- uint32_t total_output_rate = std::accumulate(output_rates.begin(), output_rates.end(), 0,
- [](int value, const auto &p) { return value + p.second; });
-
- if ((total_input_rate > max_supported_bandwidth) || (total_output_rate > max_supported_bandwidth)) {
- LOGGER__WARNING("Requested rate (input: {} Bps, output: {} Bps) is high and might be unstable. Setting rate to {}.",
- total_input_rate, total_output_rate, max_supported_bandwidth);
- if (total_output_rate > total_input_rate) {
- // Output is bigger than max rate. Adjusting input rate accordingly
- auto input_output_ratio = (total_input_rate / total_output_rate);
- LOGGER__WARNING("Output Bps ({}) is bigger than input Bps ({}) output (ratio is: {})", total_output_rate,
- total_input_rate, input_output_ratio);
- max_supported_bandwidth *= input_output_ratio;
- }
- auto total_inputs_rate_to_max_supported_ratio = (static_cast<float64_t>(max_supported_bandwidth) / total_input_rate);
- for (auto &rate_pair : input_rates) {
- auto rate = rate_pair.second * total_inputs_rate_to_max_supported_ratio;
- rate_pair.second = static_cast<uint32_t>(rate);
- }
- }
-
- return input_rates;
-}
-
-Expected<std::map<uint16_t, uint32_t>> NetworkUdpRateCalculator::get_udp_ports_rates_dict(
- std::vector<std::reference_wrapper<InputStream>> &udp_input_streams, uint32_t fps, uint32_t max_supported_bandwidth)
-{
- auto rates_per_name = calculate_inputs_bandwith(fps, max_supported_bandwidth);
- CHECK_EXPECTED(rates_per_name);
-
- std::map<uint16_t, uint32_t> results = {};
- for (const auto &input_stream : udp_input_streams) {
- uint16_t remote_port = 0;
- remote_port = reinterpret_cast<EthernetInputStream*>(&(input_stream.get()))->get_remote_port();
- results.insert(std::make_pair(remote_port,
- rates_per_name->at(input_stream.get().name())));
- }
-
- return results;
-}
-
-} /* namespace hailort */
#include "hailo/hailort.h"
#include "hailo/expected.hpp"
-#include "os/file_descriptor.hpp"
-#include "vdma/channel_id.hpp"
+
#include "common/utils.hpp"
-#include "hailo_ioctl_common.h"
+
+#include "os/file_descriptor.hpp"
+#include "vdma/channel/channel_id.hpp"
#include <mutex>
#include <thread>
#include <chrono>
#include <utility>
+#include <array>
#ifdef __QNX__
#include <sys/mman.h>
#endif // __QNX__
+
namespace hailort
{
#define PCIE_EXPECTED_MD5_LENGTH (16)
constexpr size_t VDMA_CHANNELS_PER_ENGINE = 32;
+constexpr size_t MAX_VDMA_ENGINES_COUNT = 3;
+constexpr size_t MAX_VDMA_CHANNELS_COUNT = MAX_VDMA_ENGINES_COUNT * VDMA_CHANNELS_PER_ENGINE;
constexpr uint8_t MIN_H2D_CHANNEL_INDEX = 0;
constexpr uint8_t MAX_H2D_CHANNEL_INDEX = 15;
constexpr uint8_t MIN_D2H_CHANNEL_INDEX = MAX_H2D_CHANNEL_INDEX + 1;
size_t count;
};
+struct ChannelIrqData {
+ vdma::ChannelId channel_id;
+ bool is_active;
+ uint16_t desc_num_processed;
+ uint8_t host_error;
+ uint8_t device_error;
+};
+
+struct IrqData {
+ uint8_t channels_count;
+ std::array<ChannelIrqData, MAX_VDMA_CHANNELS_COUNT> channels_irq_data;
+};
+
+// Bitmap per engine
+using ChannelsBitmap = std::array<uint32_t, MAX_VDMA_ENGINES_COUNT>;
+
#if defined(__linux__) || defined(_MSC_VER)
using vdma_mapped_buffer_driver_identifier = uintptr_t;
#elif defined(__QNX__)
};
using VdmaBufferHandle = size_t;
- using VdmaChannelHandle = uint64_t;
static Expected<HailoRTDriver> create(const std::string &dev_path);
hailo_status write_vdma_channel_register(vdma::ChannelId channel_id, DmaDirection data_direction, size_t offset,
size_t reg_size, uint32_t data);
- hailo_status vdma_buffer_sync(VdmaBufferHandle buffer, DmaDirection sync_direction, void *address, size_t buffer_size);
+ hailo_status vdma_buffer_sync(VdmaBufferHandle buffer, DmaDirection sync_direction, size_t offset, size_t count);
- Expected<VdmaChannelHandle> vdma_channel_enable(vdma::ChannelId channel_id, DmaDirection data_direction,
- bool enable_timestamps_measure);
- hailo_status vdma_channel_disable(vdma::ChannelId channel_index, VdmaChannelHandle channel_handle);
- Expected<ChannelInterruptTimestampList> wait_channel_interrupts(vdma::ChannelId channel_id,
- VdmaChannelHandle channel_handle, const std::chrono::milliseconds &timeout);
- hailo_status vdma_channel_abort(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle);
- hailo_status vdma_channel_clear_abort(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle);
+ hailo_status vdma_interrupts_enable(const ChannelsBitmap &channels_bitmap, bool enable_timestamps_measure);
+ hailo_status vdma_interrupts_disable(const ChannelsBitmap &channel_id);
+ Expected<IrqData> vdma_interrupts_wait(const ChannelsBitmap &channels_bitmap);
+ Expected<ChannelInterruptTimestampList> vdma_interrupts_read_timestamps(vdma::ChannelId channel_id);
Expected<std::vector<uint8_t>> read_notification();
hailo_status disable_notifications();
* of user allocated buffer
*/
Expected<VdmaBufferHandle> vdma_buffer_map(void *user_address, size_t required_size, DmaDirection data_direction,
- vdma_mapped_buffer_driver_identifier &driver_buff_handle);
+ const vdma_mapped_buffer_driver_identifier &driver_buff_handle);
/**
* Unmaps user buffer mapped using HailoRTDriver::map_buffer.
* Configure vdma channel descriptors to point to the given user address.
*/
hailo_status descriptors_list_bind_vdma_buffer(uintptr_t desc_handle, VdmaBufferHandle buffer_handle,
- uint16_t desc_page_size, uint8_t channel_index, size_t offset);
+ uint16_t desc_page_size, uint8_t channel_index, uint32_t starting_desc);
Expected<uintptr_t> vdma_low_memory_buffer_alloc(size_t size);
hailo_status vdma_low_memory_buffer_free(uintptr_t buffer_handle);
HailoRTDriver(HailoRTDriver &&other) noexcept = default;
HailoRTDriver &operator=(HailoRTDriver &&other) = default;
- static const VdmaChannelHandle INVALID_VDMA_CHANNEL_HANDLE;
- static const uintptr_t INVALID_DRIVER_BUFFER_HANDLE_VALUE;
- static const uint8_t INVALID_VDMA_CHANNEL_INDEX;
+ static const uintptr_t INVALID_DRIVER_BUFFER_HANDLE_VALUE;
+ static const size_t INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE;
+ static const uint8_t INVALID_VDMA_CHANNEL_INDEX;
private:
hailo_status read_memory_ioctl(MemoryType memory_type, uint64_t address, void *buf, size_t size);
HailoRTDriver(const std::string &dev_path, FileDescriptor &&fd, hailo_status &status);
bool is_valid_channel_id(const vdma::ChannelId &channel_id);
+ bool is_valid_channels_bitmap(const ChannelsBitmap &bitmap)
+ {
+ for (size_t engine_index = m_dma_engines_count; engine_index < MAX_VDMA_ENGINES_COUNT; engine_index++) {
+ if (bitmap[engine_index]) {
+ LOGGER__ERROR("Engine {} does not exist on device (engines count {})", engine_index,
+ m_dma_engines_count);
+ return false;
+ }
+ }
+ return true;
+ }
FileDescriptor m_fd;
std::string m_dev_path;
* @file mmap_buffer.hpp
* @brief RAII wrapper around memory mapping (mmap)
*
- *
+ *
**/
#ifndef _OS_MMAP_BUFFER_H_
class MmapBufferImpl final {
public:
-
static Expected<MmapBufferImpl> create_shared_memory(size_t length);
static Expected<MmapBufferImpl> create_file_map(size_t length, FileDescriptor &file, uintptr_t offset);
MmapBufferImpl(const MmapBufferImpl &other) = delete;
MmapBufferImpl &operator=(const MmapBufferImpl &other) = delete;
- MmapBufferImpl(MmapBufferImpl &&other) noexcept :
- m_address(std::exchange(other.m_address, INVALID_ADDR)),
+ MmapBufferImpl(MmapBufferImpl &&other) noexcept :
+ m_address(std::exchange(other.m_address, INVALID_ADDR)),
m_length(std::move(other.m_length)) {};
- MmapBufferImpl &operator=(MmapBufferImpl &&other) noexcept
+ MmapBufferImpl &operator=(MmapBufferImpl &&other) noexcept
{
std::swap(m_address, other.m_address);
std::swap(m_length, other.m_length);
return *this;
};
- void *get() {
+ void *address() {
return m_address;
}
- explicit operator bool() const
+ bool is_mapped() const
{
return (INVALID_ADDR != m_address);
}
void *m_address;
size_t m_length;
- bool m_unmappable;
+ bool m_unmappable;
};
template<typename T>
return MmapBuffer<T>(std::move(mmap.release()));
}
-
MmapBuffer() = default;
~MmapBuffer() = default;
T* operator->()
{
- return get();
+ return address();
}
- T* get() {
- return reinterpret_cast<T*>(m_mmap.get());
+ T* address() {
+ return reinterpret_cast<T*>(m_mmap.address());
}
-
template<typename U=T>
std::enable_if_t<!std::is_void<U>::value, U&> operator*()
{
- return get()[0];
+ return address()[0];
}
template<typename U=T>
std::enable_if_t<!std::is_void<U>::value, U&> operator[](size_t i)
{
- return get()[i];
+ return address()[i];
}
- explicit operator bool() const
+ bool is_mapped() const
{
- return bool(m_mmap);
+ return m_mmap.is_mapped();
}
// 'munmap' the current mapped buffer (if currently mapped).
{
static_assert(VDMA_CHANNELS_PER_ENGINE == MAX_VDMA_CHANNELS_PER_ENGINE, "Driver and libhailort parameters mismatch");
+static_assert(MAX_VDMA_ENGINES == MAX_VDMA_ENGINES_COUNT, "Driver and libhailort parameters mismatch");
static_assert(MIN_D2H_CHANNEL_INDEX == VDMA_DEST_CHANNELS_START, "Driver and libhailort parameters mismatch");
static hailo_dma_data_direction direction_to_dma_data_direction(HailoRTDriver::DmaDirection direction) {
return HAILO_TRANSFER_MEMORY_MAX_ENUM;
}
-static Expected<ChannelInterruptTimestampList> create_interrupt_timestamp_list(hailo_vdma_channel_wait_params &inter_data)
+static Expected<ChannelInterruptTimestampList> create_interrupt_timestamp_list(
+ hailo_vdma_interrupts_read_timestamp_params &inter_data)
{
- CHECK_AS_EXPECTED(inter_data.timestamps_count <= MAX_IRQ_TIMESTAMPS_SIZE, HAILO_PCIE_DRIVER_FAIL,
- "Invalid channel interrupt timestamps count returned {}", inter_data.timestamps_count);
- ChannelInterruptTimestampList timestamp_list;
+ CHECK_AS_EXPECTED(inter_data.timestamps_count <= MAX_IRQ_TIMESTAMPS_SIZE, HAILO_DRIVER_FAIL,
+ "Invalid channel interrupts timestamps count returned {}", inter_data.timestamps_count);
+ ChannelInterruptTimestampList timestamp_list{};
timestamp_list.count = inter_data.timestamps_count;
for (size_t i = 0; i < timestamp_list.count; i++) {
return timestamp_list;
}
-const HailoRTDriver::VdmaChannelHandle HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE = INVALID_CHANNEL_HANDLE_VALUE;
+// TODO: validate wraparounds for buffer/mapping handles in the driver (HRT-9509)
const uintptr_t HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE = INVALID_DRIVER_HANDLE_VALUE;
+const size_t HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE = INVALID_DRIVER_HANDLE_VALUE;
const uint8_t HailoRTDriver::INVALID_VDMA_CHANNEL_INDEX = INVALID_VDMA_CHANNEL;
Expected<HailoRTDriver> HailoRTDriver::create(const std::string &dev_path)
#else
#error "unsupported platform!"
#endif // __linux__
- switch (error_status) {
- case ETIMEDOUT:
- return HAILO_TIMEOUT;
- case ECONNABORTED:
- return HAILO_STREAM_ABORTED_BY_USER;
- case ECONNRESET:
- return HAILO_STREAM_NOT_ACTIVATED;
- default:
- return HAILO_PCIE_DRIVER_FAIL;
- }
+
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
break;
default:
LOGGER__ERROR("Invalid dma type returned from ioctl {}", device_properties.dma_type);
- status = HAILO_PCIE_DRIVER_FAIL;
+ status = HAILO_DRIVER_FAIL;
return;
}
int err = 0;
auto status = hailo_ioctl(this->m_fd, HAILO_READ_NOTIFICATION, ¬ification_buffer, err);
if (HAILO_SUCCESS != status) {
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
std::vector<uint8_t> notification(notification_buffer.buffer_len);
auto status = hailo_ioctl(this->m_fd, HAILO_DISABLE_NOTIFICATION, 0, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HAILO_DISABLE_NOTIFICATION failed with errno: {}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
auto status = hailo_ioctl(m_fd, HAILO_VDMA_CHANNEL_READ_REGISTER, ¶ms, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HailoRTDriver::read_vdma_channel_register failed with errno:{}", err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::move(params.data);
auto status = hailo_ioctl(m_fd, HAILO_VDMA_CHANNEL_WRITE_REGISTER, ¶ms, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HailoRTDriver::write_vdma_channel_register failed with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
auto status = hailo_ioctl(this->m_fd, HAILO_MEMORY_TRANSFER, &transfer, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HailoRTDriver::read_memory failed with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
memcpy(buf, transfer.buffer, transfer.count);
auto status = hailo_ioctl(this->m_fd, HAILO_MEMORY_TRANSFER, &transfer, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HailoRTDriver::write_memory failed with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
-hailo_status HailoRTDriver::vdma_buffer_sync(VdmaBufferHandle handle, DmaDirection sync_direction, void *address,
- size_t buffer_size)
+hailo_status HailoRTDriver::vdma_buffer_sync(VdmaBufferHandle handle, DmaDirection sync_direction, size_t offset, size_t count)
{
#if defined(__linux__)
CHECK(sync_direction != DmaDirection::BOTH, HAILO_INVALID_ARGUMENT, "Can't sync vdma data both host and device");
hailo_vdma_buffer_sync_params sync_info{
.handle = handle,
.sync_type = (sync_direction == DmaDirection::H2D) ? HAILO_SYNC_FOR_DEVICE : HAILO_SYNC_FOR_HOST,
- .buffer_address = address,
- .buffer_size = buffer_size
+ .offset = offset,
+ .count = count
};
int err = 0;
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_BUFFER_SYNC, &sync_info, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HAILO_VDMA_BUFFER_SYNC failed with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
// TODO: HRT-6717 - Remove ifdef when Implement sync ioctl (if determined needed in qnx)
#elif defined( __QNX__)
(void) handle;
(void) sync_direction;
- (void) address;
- (void) buffer_size;
+ (void) offset;
+ (void) count;
return HAILO_SUCCESS;
#else
#error "unsupported platform!"
#endif // __linux__
}
-
-Expected<HailoRTDriver::VdmaChannelHandle> HailoRTDriver::vdma_channel_enable(vdma::ChannelId channel_id,
- DmaDirection data_direction, bool enable_timestamps_measure)
+hailo_status HailoRTDriver::vdma_interrupts_enable(const ChannelsBitmap &channels_bitmap, bool enable_timestamps_measure)
{
- CHECK_AS_EXPECTED(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- CHECK_AS_EXPECTED(data_direction != DmaDirection::BOTH, HAILO_INVALID_ARGUMENT, "Invalid direction given");
- hailo_vdma_channel_enable_params params {
- .engine_index = channel_id.engine_index,
- .channel_index = channel_id.channel_index,
- .direction = direction_to_dma_data_direction(data_direction),
- .enable_timestamps_measure = enable_timestamps_measure,
- .channel_handle = INVALID_CHANNEL_HANDLE_VALUE,
- };
+ CHECK(is_valid_channels_bitmap(channels_bitmap), HAILO_INVALID_ARGUMENT, "Invalid channel bitmap given");
+ hailo_vdma_interrupts_enable_params params{};
+ std::copy(channels_bitmap.begin(), channels_bitmap.end(), params.channels_bitmap_per_engine);
+ params.enable_timestamps_measure = enable_timestamps_measure;
int err = 0;
- auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CHANNEL_ENABLE, ¶ms, err);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to enable interrupt for channel {} with errno:{}", channel_id, err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
- }
+ auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_ENABLE, ¶ms, err);
+ CHECK_SUCCESS(status, "Failed to enable vdma interrupts with errno:{}", err);
- return VdmaChannelHandle(params.channel_handle);
+ return HAILO_SUCCESS;
}
-hailo_status HailoRTDriver::vdma_channel_disable(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle)
+hailo_status HailoRTDriver::vdma_interrupts_disable(const ChannelsBitmap &channels_bitmap)
{
- CHECK(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- hailo_vdma_channel_disable_params params {
- .engine_index = channel_id.engine_index,
- .channel_index = channel_id.channel_index,
- .channel_handle = channel_handle
- };
+ CHECK(is_valid_channels_bitmap(channels_bitmap), HAILO_INVALID_ARGUMENT, "Invalid channel bitmap given");
+ hailo_vdma_interrupts_disable_params params{};
+ std::copy(channels_bitmap.begin(), channels_bitmap.end(), params.channels_bitmap_per_engine);
int err = 0;
- auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CHANNEL_DISABLE, ¶ms, err);
+ auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_DISABLE, ¶ms, err);
if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to disable interrupt for channel {} with errno:{}", channel_id, err);
- return HAILO_PCIE_DRIVER_FAIL;
+ LOGGER__ERROR("Failed to disable vdma interrupts with errno:{}", err);
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
-Expected<ChannelInterruptTimestampList> HailoRTDriver::wait_channel_interrupts(vdma::ChannelId channel_id,
- VdmaChannelHandle channel_handle, const std::chrono::milliseconds &timeout)
+static Expected<IrqData> to_irq_data(const hailo_vdma_interrupts_wait_params& params,
+ uint8_t engines_count)
{
- CHECK_AS_EXPECTED(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- CHECK_AS_EXPECTED(timeout.count() >= 0, HAILO_INVALID_ARGUMENT);
+ static_assert(ARRAY_ENTRIES(IrqData::channels_irq_data) == ARRAY_ENTRIES(params.irq_data), "Mismatch irq data size");
+ CHECK_AS_EXPECTED(params.channels_count <= ARRAY_ENTRIES(params.irq_data), HAILO_DRIVER_FAIL,
+ "Invalid channels count returned from vdma_interrupts_wait");
-#if defined(__linux__)
- struct hailo_channel_interrupt_timestamp timestamps[MAX_IRQ_TIMESTAMPS_SIZE];
-#endif
+ IrqData irq{};
+ irq.channels_count = params.channels_count;
+ for (uint8_t i = 0; i < params.channels_count; i++) {
+ const auto engine_index = params.irq_data[i].engine_index;
+ const auto channel_index = params.irq_data[i].channel_index;
+ CHECK_AS_EXPECTED(engine_index < engines_count, HAILO_DRIVER_FAIL,
+ "Invalid engine index {} returned from vdma_interrupts_wait, max {}", engine_index, engines_count);
+ CHECK_AS_EXPECTED(channel_index < MAX_VDMA_CHANNELS_PER_ENGINE, HAILO_DRIVER_FAIL,
+ "Invalid channel_index index {} returned from vdma_interrupts_wait", channel_index);
- hailo_vdma_channel_wait_params data {
- .engine_index = channel_id.engine_index,
- .channel_index = channel_id.channel_index,
- .channel_handle = channel_handle,
- .timeout_ms = static_cast<uint64_t>(timeout.count()),
- .timestamps_count = MAX_IRQ_TIMESTAMPS_SIZE,
-// In linux send address to local buffer because there isnt room on stack for array
-#if defined(__linux__)
- .timestamps = timestamps,
-#elif defined(__QNX__)
- .timestamps = {}
-#else
-#error "unsupported platform!"
-#endif // __linux__
- };
+ irq.channels_irq_data[i].channel_id.engine_index = engine_index;
+ irq.channels_irq_data[i].channel_id.channel_index = channel_index;
+ irq.channels_irq_data[i].is_active = params.irq_data[i].is_active;
+ irq.channels_irq_data[i].desc_num_processed = params.irq_data[i].host_num_processed;
+ irq.channels_irq_data[i].host_error = params.irq_data[i].host_error;
+ irq.channels_irq_data[i].device_error = params.irq_data[i].device_error;
+ }
+ return irq;
+}
+
+Expected<IrqData> HailoRTDriver::vdma_interrupts_wait(const ChannelsBitmap &channels_bitmap)
+{
+ CHECK_AS_EXPECTED(is_valid_channels_bitmap(channels_bitmap), HAILO_INVALID_ARGUMENT, "Invalid channel bitmap given");
+ hailo_vdma_interrupts_wait_params params{};
+ std::copy(channels_bitmap.begin(), channels_bitmap.end(), params.channels_bitmap_per_engine);
int err = 0;
- auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CHANNEL_WAIT_INT, &data, err);
+ auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_WAIT, ¶ms, err);
if (HAILO_SUCCESS != status) {
- if (HAILO_TIMEOUT == status) {
- LOGGER__ERROR("Waiting for interrupt for channel {} timed-out (errno=ETIMEDOUT)", channel_id);
- return make_unexpected(status);
- }
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Channel (index={}) was aborted!", channel_id);
- return make_unexpected(status);
- }
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("Channel (index={}) was deactivated!", channel_id);
- return make_unexpected(status);
- }
- LOGGER__ERROR("Failed to wait interrupt for channel {} with errno:{}", channel_id, err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ LOGGER__ERROR("Failed to wait vdma interrupts with errno:{}", err);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
+ return to_irq_data(params, static_cast<uint8_t>(m_dma_engines_count));
+}
+
+Expected<ChannelInterruptTimestampList> HailoRTDriver::vdma_interrupts_read_timestamps(vdma::ChannelId channel_id)
+{
+ hailo_vdma_interrupts_read_timestamp_params data{};
+ data.engine_index = channel_id.engine_index;
+ data.channel_index = channel_id.channel_index;
+
+ int err = 0;
+ auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS, &data, err);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
return create_interrupt_timestamp_list(data);
}
.read_bytes = 0
};
- CHECK(buffer_size <= sizeof(params.buffer), HAILO_PCIE_DRIVER_FAIL,
+ CHECK(buffer_size <= sizeof(params.buffer), HAILO_DRIVER_FAIL,
"Given buffer size {} is bigger than buffer size used to read logs {}", buffer_size, sizeof(params.buffer));
int err = 0;
auto status = hailo_ioctl(this->m_fd, HAILO_READ_LOG, ¶ms, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to read log with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
- CHECK(params.read_bytes <= sizeof(params.buffer), HAILO_PCIE_DRIVER_FAIL,
+ CHECK(params.read_bytes <= sizeof(params.buffer), HAILO_DRIVER_FAIL,
"Amount of bytes read from log {} is bigger than size of buffer {}", params.read_bytes, sizeof(params.buffer));
memcpy(buffer, params.buffer, params.read_bytes);
auto status = hailo_ioctl(this->m_fd, HAILO_RESET_NN_CORE, nullptr, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to reset nn core with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
Expected<HailoRTDriver::VdmaBufferHandle> HailoRTDriver::vdma_buffer_map(void *user_address, size_t required_size,
- DmaDirection data_direction, vdma_mapped_buffer_driver_identifier &driver_buff_handle)
+ DmaDirection data_direction, const vdma_mapped_buffer_driver_identifier &driver_buff_handle)
{
#if defined(__linux__)
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_BUFFER_MAP, &map_user_buffer_info, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to map user buffer with errno:{}", err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return VdmaBufferHandle(map_user_buffer_info.mapped_handle);
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_BUFFER_UNMAP, &unmap_user_buffer_info, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to unmap user buffer with errno:{}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
auto status = hailo_ioctl(this->m_fd, HAILO_DESC_LIST_CREATE, &create_desc_info, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to create descriptors list with errno:{}", err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::make_pair(create_desc_info.desc_handle, create_desc_info.dma_address);
auto status = hailo_ioctl(this->m_fd, HAILO_DESC_LIST_RELEASE, &desc_handle, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to release descriptors list with errno: {}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
hailo_status HailoRTDriver::descriptors_list_bind_vdma_buffer(uintptr_t desc_handle, VdmaBufferHandle buffer_handle,
- uint16_t desc_page_size, uint8_t channel_index, size_t offset)
+ uint16_t desc_page_size, uint8_t channel_index, uint32_t starting_desc)
{
hailo_desc_list_bind_vdma_buffer_params config_info;
config_info.buffer_handle = buffer_handle;
config_info.desc_handle = desc_handle;
config_info.desc_page_size = desc_page_size;
config_info.channel_index = channel_index;
- config_info.offset = offset;
+ config_info.starting_desc = starting_desc;
int err = 0;
auto status = hailo_ioctl(this->m_fd, HAILO_DESC_LIST_BIND_VDMA_BUFFER, &config_info, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to bind vdma buffer to descriptors list with errno: {}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
-hailo_status HailoRTDriver::vdma_channel_abort(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle)
-{
- CHECK(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
-
- hailo_vdma_channel_abort_params params = {
- .engine_index = channel_id.engine_index,
- .channel_index = channel_id.channel_index,
- .channel_handle = channel_handle
- };
-
- int err = 0;
- auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CHANNEL_ABORT, ¶ms, err);
- if (HAILO_SUCCESS != status) {
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__DEBUG("Channel (index={}) was deactivated!", channel_id);
- return status;
- }
- else {
- LOGGER__ERROR("Failed to abort vdma channel (index={}) with errno: {}", channel_id, err);
- return HAILO_PCIE_DRIVER_FAIL;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HailoRTDriver::vdma_channel_clear_abort(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle)
-{
- CHECK(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
-
- hailo_vdma_channel_clear_abort_params params = {
- .engine_index = channel_id.engine_index,
- .channel_index = channel_id.channel_index,
- .channel_handle = channel_handle
- };
-
- int err = 0;
- auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CHANNEL_CLEAR_ABORT, ¶ms, err);
- if (HAILO_SUCCESS != status) {
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__DEBUG("Channel (index={}) was deactivated!", channel_id);
- return status;
- }
- else {
- LOGGER__ERROR("Failed to clear abort vdma channel (index={}) with errno: {}", channel_id, err);
- return HAILO_PCIE_DRIVER_FAIL;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
Expected<uintptr_t> HailoRTDriver::vdma_low_memory_buffer_alloc(size_t size)
{
CHECK_AS_EXPECTED(m_allocate_driver_buffer, HAILO_INVALID_OPERATION,
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC, &allocate_params, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to allocate buffer with errno: {}", err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::move(allocate_params.buffer_handle);
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE, (void*)buffer_handle, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to free allocated buffer with errno: {}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC, ¶ms, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed allocate continuous buffer with errno:{}", err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::make_pair(params.buffer_handle, params.dma_address);
auto status = hailo_ioctl(this->m_fd, HAILO_VDMA_CONTINUOUS_BUFFER_FREE, (void*)buffer_handle, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to free continuous buffer with errno: {}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
auto status = hailo_ioctl(this->m_fd, HAILO_MARK_AS_IN_USE, ¶ms, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("Failed to mark device as in use with errno: {}", err);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
if (params.in_use) {
return HAILO_DEVICE_IN_USE;
Expected<MmapBufferImpl> MmapBufferImpl::create_shared_memory(size_t length)
{
- void *address = mmap(nullptr, length, PROT_WRITE | PROT_READ,
+ void *address = mmap(nullptr, length, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_SHARED | MAP_UNINITIALIZED,
INVALID_FD, /*offset=*/ 0);
auto status = HailoRTDriver::hailo_ioctl(file, HAILO_NON_LINUX_DESC_LIST_MMAP, &map_vdma_list_params, err);
if (HAILO_SUCCESS != status) {
LOGGER__ERROR("HAILO_NON_LINUX_DESC_LIST_MMAP failed with errno:{}", err);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
void *address = mmap(nullptr, length, PROT_WRITE | PROT_READ | PROT_NOCACHE, MAP_SHARED | MAP_PHYS, NOFD, (off_t)map_vdma_list_params.user_address);
hailo_status MmapBufferImpl::unmap()
{
- if (INVALID_ADDR != m_address) {
- if (0 != munmap(m_address, m_length)) {
- LOGGER__ERROR("munmap of address {}, length: {} failed with errno {}", (void*)m_address, m_length, errno);
- return HAILO_INTERNAL_FAILURE;
- }
- m_address = INVALID_ADDR;
- m_length = 0;
+ if (!is_mapped()) {
+ return HAILO_SUCCESS;
}
+
+ if (0 != munmap(m_address, m_length)) {
+ LOGGER__ERROR("munmap of address {}, length: {} failed with errno {}", (void*)m_address, m_length, errno);
+ return HAILO_INTERNAL_FAILURE;
+ }
+
+ m_address = INVALID_ADDR;
+ m_length = 0;
return HAILO_SUCCESS;
}
}
else {
LOGGER__ERROR("Failed to open hailo pcie class ({}), errno {}", HAILO_PCIE_CLASS_PATH, errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
}
* This class implements our Events API over the neosmart pevents events. It also implement the Semaphore behavior and API
* Using the pevents events. For more information check out the implementation of pevents https://github.com/neosmart/pevents
**/
-#include "hailo/event.hpp"
+
#include "hailo/hailort.h"
+#include "hailo/event.hpp"
+
#include "common/utils.hpp"
-#include "event_internal.hpp"
+
+#include "utils/event_internal.hpp"
#include <poll.h>
#include <utility>
#include "pevents.h"
#undef WFMO
+
#define INVALID_EVENT_HANDLE (nullptr)
#define WAIT_OBJECT_0 (0)
}
else {
LOGGER__ERROR("Failed to open hailo pcie class ({}), errno {}", HAILO_CLASS_PATH, errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
}
const std::string device_id_path = std::string(HAILO_CLASS_PATH) + "/" +
device_name + "/" + HAILO_BOARD_LOCATION_FILENAME;
std::ifstream device_id_file(device_id_path);
- CHECK_AS_EXPECTED(device_id_file.good(), HAILO_PCIE_DRIVER_FAIL, "Failed open {}", device_id_path);
+ CHECK_AS_EXPECTED(device_id_file.good(), HAILO_DRIVER_FAIL, "Failed open {}", device_id_path);
std::string device_id;
std::getline(device_id_file, device_id);
- CHECK_AS_EXPECTED(device_id_file.eof(), HAILO_PCIE_DRIVER_FAIL, "Failed read {}", device_id_path);
+ CHECK_AS_EXPECTED(device_id_file.eof(), HAILO_DRIVER_FAIL, "Failed read {}", device_id_path);
HailoRTDriver::DeviceInfo device_info = {};
device_info.dev_path = std::string("/dev/") + device_name;
*
* TODO: doc
**/
-#include "hailo/event.hpp"
+
#include "hailo/hailort.h"
+#include "hailo/event.hpp"
+
#include "common/utils.hpp"
-#include "event_internal.hpp"
+
+#include "utils/event_internal.hpp"
#include <sys/eventfd.h>
#include <poll.h>
#include <utility>
+
namespace hailort
{
uint32_t number = 0;
if (!prop.Number(number)) {
LOGGER__ERROR("Failed parsing prop");
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return number;
}
* @file event.cpp
* @brief Event & Semaphore wrapper for Windows
**/
-#include "hailo/event.hpp"
+
#include "hailo/hailort.h"
+#include "hailo/event.hpp"
+
#include "common/utils.hpp"
-#include "event_internal.hpp"
+
+#include "utils/event_internal.hpp"
#include <utility>
#include <limits>
+
namespace hailort
{
{
static_assert(VDMA_CHANNELS_PER_ENGINE == MAX_VDMA_CHANNELS_PER_ENGINE, "Driver and libhailort parameters mismatch");
+static_assert(MAX_VDMA_ENGINES == MAX_VDMA_ENGINES_COUNT, "Driver and libhailort parameters mismatch");
static_assert(MIN_D2H_CHANNEL_INDEX == VDMA_DEST_CHANNELS_START, "Driver and libhailort parameters mismatch");
//TODO HRT-7309: merge with posix
return 0;
}
-const HailoRTDriver::VdmaChannelHandle HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE = INVALID_CHANNEL_HANDLE_VALUE;
+// TODO: validate wraparounds for buffer/mapping handles in the driver (HRT-9509)
const uintptr_t HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE = INVALID_DRIVER_HANDLE_VALUE;
+const size_t HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE = INVALID_DRIVER_HANDLE_VALUE;
const uint8_t HailoRTDriver::INVALID_VDMA_CHANNEL_INDEX = INVALID_VDMA_CHANNEL;
static hailo_status validate_driver_version(const hailo_driver_info &driver_info)
hailo_driver_info& driver_info = data.Buffer.DriverInfo;
if (0 > ioctl(m_fd, HAILO_QUERY_DRIVER_INFO, &data)) {
LOGGER__ERROR("Failed to query driver info, errno {}", errno);
- status = HAILO_PCIE_DRIVER_FAIL;
+ status = HAILO_DRIVER_FAIL;
return;
}
status = validate_driver_version(driver_info);
hailo_device_properties& device_properties = data.Buffer.DeviceProperties;
if (0 > ioctl(m_fd, HAILO_QUERY_DEVICE_PROPERTIES, &data)) {
LOGGER__ERROR("Failed query pcie device properties, errno {}", errno);
- status = HAILO_PCIE_DRIVER_FAIL;
+ status = HAILO_DRIVER_FAIL;
return;
}
break;
default:
LOGGER__ERROR("Invalid dma type returned from ioctl {}", device_properties.dma_type);
- status = HAILO_PCIE_DRIVER_FAIL;
+ status = HAILO_DRIVER_FAIL;
return;
}
auto rc = ioctl(this->m_fd, HAILO_READ_NOTIFICATION, &data);
if (0 > rc) {
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
std::vector<uint8_t> notification(notification_buffer.buffer_len);
tCompatibleHailoIoctlData data = {};
int res = ioctl(m_fd, HAILO_DISABLE_NOTIFICATION, &data);
- CHECK(0 <= res, HAILO_PCIE_DRIVER_FAIL, "HAILO_DISABLE_NOTIFICATION failed with errno: {}", errno);
+ CHECK(0 <= res, HAILO_DRIVER_FAIL, "HAILO_DISABLE_NOTIFICATION failed with errno: {}", errno);
return HAILO_SUCCESS;
}
if (0 > ioctl(m_fd, HAILO_MEMORY_TRANSFER, &data)) {
LOGGER__ERROR("HailoRTDriver::read_memory failed with errno:{}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
memcpy(buf, transfer.buffer, transfer.count);
if (0 > ioctl(this->m_fd, HAILO_MEMORY_TRANSFER, &data)) {
LOGGER__ERROR("HailoRTDriver::write_memory failed with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_READ_REGISTER, &data)) {
LOGGER__ERROR("HailoRTDriver::read_vdma_channel_register failed with errno: {}", errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::move(params.data);
if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_WRITE_REGISTER, &data)) {
LOGGER__ERROR("HailoRTDriver::write_vdma_channel_register failed with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
-hailo_status HailoRTDriver::vdma_buffer_sync(VdmaBufferHandle handle, DmaDirection sync_direction, void *address,
- size_t buffer_size)
+hailo_status HailoRTDriver::vdma_buffer_sync(VdmaBufferHandle handle, DmaDirection sync_direction, size_t offset, size_t count)
{
CHECK(sync_direction != DmaDirection::BOTH, HAILO_INVALID_ARGUMENT, "Can't sync vdma data both host and device");
tCompatibleHailoIoctlData data = {};
hailo_vdma_buffer_sync_params& sync_info = data.Buffer.VdmaBufferSync;
sync_info.handle = handle;
sync_info.sync_type = (sync_direction == DmaDirection::H2D) ? HAILO_SYNC_FOR_DEVICE : HAILO_SYNC_FOR_HOST;
- sync_info.buffer_address = address;
- sync_info.buffer_size = buffer_size;
+ sync_info.offset = offset;
+ sync_info.count = count;
if (0 > ioctl(this->m_fd, HAILO_VDMA_BUFFER_SYNC, &data)) {
LOGGER__ERROR("HAILO_VDMA_BUFFER_SYNC failed with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
-Expected<HailoRTDriver::VdmaChannelHandle> HailoRTDriver::vdma_channel_enable(vdma::ChannelId channel_id,
- DmaDirection data_direction, bool enable_timestamps_measure)
+
+hailo_status HailoRTDriver::vdma_interrupts_enable(const ChannelsBitmap &channels_bitmap, bool enable_timestamps_measure)
{
- CHECK_AS_EXPECTED(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- CHECK_AS_EXPECTED(data_direction != DmaDirection::BOTH, HAILO_INVALID_ARGUMENT, "Invalid direction given");
+ CHECK(is_valid_channels_bitmap(channels_bitmap), HAILO_INVALID_ARGUMENT, "Invalid channel bitmap given");
tCompatibleHailoIoctlData data = {};
- hailo_vdma_channel_enable_params& params = data.Buffer.ChannelEnable;
- params.engine_index = channel_id.engine_index;
- params.channel_index = channel_id.channel_index;
- params.direction = direction_to_dma_data_direction(data_direction);
+ hailo_vdma_interrupts_enable_params& params = data.Buffer.VdmaInterruptsEnable;
+ std::copy(channels_bitmap.begin(), channels_bitmap.end(), params.channels_bitmap_per_engine);
params.enable_timestamps_measure = enable_timestamps_measure;
- if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_ENABLE, &data)) {
- LOGGER__ERROR("Failed to enable interrupt for channel {} with errno: {}", channel_id.channel_index, errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
- }
+ CHECK(ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_ENABLE, &data) >= 0, HAILO_DRIVER_FAIL,
+ "Failed to enable vdma interrupts with errno:{}", errno);
- return std::move(params.channel_handle);
+ return HAILO_SUCCESS;
}
-hailo_status HailoRTDriver::vdma_channel_disable(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle)
+hailo_status HailoRTDriver::vdma_interrupts_disable(const ChannelsBitmap &channels_bitmap)
{
- CHECK(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
+ CHECK(is_valid_channels_bitmap(channels_bitmap), HAILO_INVALID_ARGUMENT, "Invalid channel bitmap given");
tCompatibleHailoIoctlData data = {};
- hailo_vdma_channel_disable_params& params = data.Buffer.ChannelDisable;
- params.engine_index = channel_id.engine_index;
- params.channel_index = channel_id.channel_index;
- params.channel_handle = channel_handle;
+ hailo_vdma_interrupts_disable_params& params = data.Buffer.VdmaInterruptsDisable;
+ std::copy(channels_bitmap.begin(), channels_bitmap.end(), params.channels_bitmap_per_engine);
+
- if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_DISABLE, &data)) {
- LOGGER__ERROR("Failed to disable interrupt for channel {} with errno: {}", channel_id, errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ if (0 > ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_DISABLE, &data)) {
+ LOGGER__ERROR("Failed to disable vdma interrupts with errno: {}", errno);
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
-//TODO: unify
-static Expected<ChannelInterruptTimestampList> create_interrupt_timestamp_list(hailo_vdma_channel_wait_params &inter_data)
+// TODO: HRT-7309 - unite with posix
+static Expected<ChannelInterruptTimestampList> create_interrupt_timestamp_list(
+ hailo_vdma_interrupts_read_timestamp_params &inter_data)
{
- CHECK_AS_EXPECTED(inter_data.timestamps_count <= MAX_IRQ_TIMESTAMPS_SIZE, HAILO_PCIE_DRIVER_FAIL,
- "Invalid channel interrupt timestamps count returned {}", inter_data.timestamps_count);
- ChannelInterruptTimestampList timestamp_list;
+ CHECK_AS_EXPECTED(inter_data.timestamps_count <= MAX_IRQ_TIMESTAMPS_SIZE, HAILO_DRIVER_FAIL,
+ "Invalid channel interrupts timestamps count returned {}", inter_data.timestamps_count);
+ ChannelInterruptTimestampList timestamp_list{};
+
timestamp_list.count = inter_data.timestamps_count;
for (size_t i = 0; i < timestamp_list.count; i++) {
timestamp_list.timestamp_list[i].timestamp = std::chrono::nanoseconds(inter_data.timestamps[i].timestamp_ns);
timestamp_list.timestamp_list[i].desc_num_processed = inter_data.timestamps[i].desc_num_processed;
}
- return std::move(timestamp_list);
+ return timestamp_list;
}
-Expected<ChannelInterruptTimestampList> HailoRTDriver::wait_channel_interrupts(vdma::ChannelId channel_id,
- VdmaChannelHandle channel_handle, const std::chrono::milliseconds &timeout)
+static Expected<IrqData> to_irq_data(const hailo_vdma_interrupts_wait_params& params,
+ uint8_t engines_count)
{
- CHECK_AS_EXPECTED(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- CHECK_AS_EXPECTED(timeout.count() >= 0, HAILO_INVALID_ARGUMENT);
+ static_assert(ARRAY_ENTRIES(IrqData::channels_irq_data) == ARRAY_ENTRIES(params.irq_data), "Mismatch irq data size");
+ CHECK_AS_EXPECTED(params.channels_count <= ARRAY_ENTRIES(params.irq_data), HAILO_DRIVER_FAIL,
+ "Invalid channels count returned from vdma_interrupts_wait");
+
+ IrqData irq{};
+ irq.channels_count = params.channels_count;
+ for (uint8_t i = 0; i < params.channels_count; i++) {
+ const auto engine_index = params.irq_data[i].engine_index;
+ const auto channel_index = params.irq_data[i].channel_index;
+ CHECK_AS_EXPECTED(engine_index < engines_count, HAILO_DRIVER_FAIL,
+ "Invalid engine index {} returned from vdma_interrupts_wait, max {}", engine_index, engines_count);
+ CHECK_AS_EXPECTED(channel_index < MAX_VDMA_CHANNELS_PER_ENGINE, HAILO_DRIVER_FAIL,
+ "Invalid channel_index index {} returned from vdma_interrupts_wait", channel_index);
+
+ irq.channels_irq_data[i].channel_id.engine_index = engine_index;
+ irq.channels_irq_data[i].channel_id.channel_index = channel_index;
+ irq.channels_irq_data[i].is_active = params.irq_data[i].is_active;
+ irq.channels_irq_data[i].desc_num_processed = params.irq_data[i].host_num_processed;
+ irq.channels_irq_data[i].host_error = params.irq_data[i].host_error;
+ irq.channels_irq_data[i].device_error = params.irq_data[i].device_error;
+ }
+ return irq;
+}
+
+Expected<IrqData> HailoRTDriver::vdma_interrupts_wait(const ChannelsBitmap &channels_bitmap)
+{
+ CHECK_AS_EXPECTED(is_valid_channels_bitmap(channels_bitmap), HAILO_INVALID_ARGUMENT, "Invalid channel bitmap given");
+ tCompatibleHailoIoctlData data = {};
+ hailo_vdma_interrupts_wait_params& params = data.Buffer.VdmaInterruptsWait;
+ std::copy(channels_bitmap.begin(), channels_bitmap.end(), params.channels_bitmap_per_engine);
+
+ if (0 > ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_WAIT, &data)) {
+ LOGGER__ERROR("Failed to wait interrupts for channels bitmap with errno: {}", errno);
+ return make_unexpected(HAILO_DRIVER_FAIL);
+ }
+
+ return to_irq_data(params, static_cast<uint8_t>(m_dma_engines_count));
+}
+Expected<ChannelInterruptTimestampList> HailoRTDriver::vdma_interrupts_read_timestamps(vdma::ChannelId channel_id)
+{
tCompatibleHailoIoctlData data = {};
- hailo_vdma_channel_wait_params& params = data.Buffer.ChannelWait;
+ hailo_vdma_interrupts_read_timestamp_params ¶ms = data.Buffer.VdmaInterruptsReadTimestamps;
params.engine_index = channel_id.engine_index;
params.channel_index = channel_id.channel_index;
- params.channel_handle = channel_handle;
- params.timeout_ms = static_cast<uint64_t>(timeout.count());
- params.timestamps_count = MAX_IRQ_TIMESTAMPS_SIZE;
-
- if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_WAIT_INT, &data)) {
- const auto ioctl_errno = errno;
- if (ERROR_SEM_TIMEOUT == ioctl_errno) {
- LOGGER__ERROR("Waiting for interrupt for channel {} timed-out", channel_id);
- return make_unexpected(HAILO_TIMEOUT);
- }
- if (ERROR_OPERATION_ABORTED == ioctl_errno) {
- LOGGER__INFO("Stream (index={}) was aborted!", channel_id);
- return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
- }
- if (ERROR_NOT_READY == ioctl_errno) {
- LOGGER__INFO("Channel (index={}) was deactivated!", channel_id);
- return make_unexpected(HAILO_STREAM_NOT_ACTIVATED);
- }
- LOGGER__ERROR("Failed to wait interrupt for channel {} with errno: {}", channel_id, ioctl_errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+
+ if (0 > ioctl(this->m_fd, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS, &data)) {
+ LOGGER__ERROR("Failed to read channel interrupts timestamps errno: {}", errno);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return create_interrupt_timestamp_list(params);
}
Expected<size_t> HailoRTDriver::vdma_buffer_map(void *user_address, size_t required_size, DmaDirection data_direction,
- vdma_mapped_buffer_driver_identifier &driver_buff_handle)
+ const vdma_mapped_buffer_driver_identifier &driver_buff_handle)
{
tCompatibleHailoIoctlData data = {};
hailo_vdma_buffer_map_params& map_user_buffer_info = data.Buffer.VdmaBufferMap;
if (0 > ioctl(this->m_fd, HAILO_VDMA_BUFFER_MAP, &data)) {
LOGGER__ERROR("Failed to map user buffer with errno: {}", errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::move(map_user_buffer_info.mapped_handle);
unmap_user_buffer_info.mapped_handle = handle;
if (0 > ioctl(this->m_fd, HAILO_VDMA_BUFFER_UNMAP, &data)) {
LOGGER__ERROR("Failed to unmap user buffer with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
if (0 > ioctl(this->m_fd, HAILO_DESC_LIST_CREATE, &data)) {
LOGGER__ERROR("Failed to create descriptors list with errno: {}", errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
return std::move(std::make_pair(create_desc_info.desc_handle, create_desc_info.dma_address));
release_desc_info = desc_handle;
if (0 > ioctl(this->m_fd, HAILO_DESC_LIST_RELEASE, &data)) {
LOGGER__ERROR("Failed to release descriptors list with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
}
hailo_status HailoRTDriver::descriptors_list_bind_vdma_buffer(uintptr_t desc_handle, VdmaBufferHandle buffer_handle,
- uint16_t desc_page_size, uint8_t channel_index, size_t offset)
+ uint16_t desc_page_size, uint8_t channel_index, uint32_t starting_desc)
{
tCompatibleHailoIoctlData data = {};
hailo_desc_list_bind_vdma_buffer_params& config_info = data.Buffer.DescListBind;
config_info.desc_handle = desc_handle;
config_info.desc_page_size = desc_page_size;
config_info.channel_index = channel_index;
- config_info.offset = offset;
+ config_info.starting_desc = starting_desc;
if (0 > ioctl(this->m_fd, HAILO_DESC_LIST_BIND_VDMA_BUFFER, &data)) {
LOGGER__ERROR("Failed to bind vdma buffer to descriptors list with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HailoRTDriver::vdma_channel_abort(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle)
-{
- CHECK(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- tCompatibleHailoIoctlData data = {};
- hailo_vdma_channel_abort_params& params = data.Buffer.ChannelAbort;
- params.engine_index = channel_id.engine_index;
- params.channel_index = channel_id.channel_index;
- params.channel_handle = channel_handle;
- if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_ABORT, &data)) {
- LOGGER__ERROR("Failed to abort vdma channel (index={}) with errno: {}", channel_id, errno);
- return HAILO_PCIE_DRIVER_FAIL;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HailoRTDriver::vdma_channel_clear_abort(vdma::ChannelId channel_id, VdmaChannelHandle channel_handle)
-{
- CHECK(is_valid_channel_id(channel_id), HAILO_INVALID_ARGUMENT, "Invalid channel id {} given", channel_id);
- tCompatibleHailoIoctlData data = {};
- hailo_vdma_channel_clear_abort_params& params = data.Buffer.ChannelClearAbort;
- params.engine_index = channel_id.engine_index;
- params.channel_index = channel_id.channel_index;
- params.channel_handle = channel_handle;
- if (0 > ioctl(this->m_fd, HAILO_VDMA_CHANNEL_CLEAR_ABORT, &data)) {
- LOGGER__ERROR("Failed to clear abort vdma channel (index={}) with errno: {}", channel_id, errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
return HAILO_SUCCESS;
if (0 > ioctl(this->m_fd, HAILO_READ_LOG, &data)) {
LOGGER__ERROR("Failed to read log with errno:{}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
- CHECK(params.read_bytes <= sizeof(params.buffer), HAILO_PCIE_DRIVER_FAIL,
+ CHECK(params.read_bytes <= sizeof(params.buffer), HAILO_DRIVER_FAIL,
"Amount of bytes read from log {} is bigger than size of buffer {}",
params.read_bytes, sizeof(params.buffer));
data.Buffer.DescListMmap.size = length;
if (0 > ioctl(file, HAILO_NON_LINUX_DESC_LIST_MMAP, &data)) {
LOGGER__ERROR("Failed to map physical memory with errno: {}", errno);
- return make_unexpected(HAILO_PCIE_DRIVER_FAIL);
+ return make_unexpected(HAILO_DRIVER_FAIL);
}
// this mapping will be deleted automatically with the physical allocation
return MmapBufferImpl(data.Buffer.DescListMmap.user_address, length, false);
tCompatibleHailoIoctlData data = {};
if (0 > ioctl(this->m_fd, HAILO_MARK_AS_IN_USE, &data)) {
LOGGER__ERROR("Failed to mark device as in use with errno: {}", errno);
- return HAILO_PCIE_DRIVER_FAIL;
+ return HAILO_DRIVER_FAIL;
}
if (data.Buffer.MarkAsInUse.in_use) {
return HAILO_DEVICE_IN_USE;
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pcie_device.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "pcie_device.hpp"
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-#include "hailo/device.hpp"
-#include "hailo/hef.hpp"
-#include "control.hpp"
-#include "common/compiler_extensions_compat.hpp"
-#include "os/hailort_driver.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-#include "context_switch/multi_context/vdma_config_manager.hpp"
-
-#include <new>
-#include <algorithm>
-
-namespace hailort
-{
-
-Expected<std::vector<hailo_pcie_device_info_t>> PcieDevice::scan()
-{
- auto scan_results = HailoRTDriver::scan_devices();
- CHECK_EXPECTED(scan_results);
-
- std::vector<hailo_pcie_device_info_t> out_results;
- out_results.reserve(scan_results->size());
- for (const auto &scan_result : scan_results.value()) {
- const bool DONT_LOG_ON_FAILURE = true;
- auto device_info = parse_pcie_device_info(scan_result.device_id, DONT_LOG_ON_FAILURE);
- if (device_info) {
- out_results.emplace_back(device_info.release());
- }
- }
-
- return out_results;
-}
-
-Expected<std::unique_ptr<PcieDevice>> PcieDevice::create()
-{
- // Take the first device
- auto scan_result = scan();
- CHECK_EXPECTED(scan_result, "Failed scanning pcie devices");
- CHECK_AS_EXPECTED(scan_result->size() == 1, HAILO_INVALID_OPERATION,
- "Expected only 1 PCIe device. Pass `hailo_pcie_device_info_t` to create a specific PCIe device");
- return create(scan_result->at(0));
-}
-
-Expected<std::unique_ptr<PcieDevice>> PcieDevice::create(const hailo_pcie_device_info_t &pcie_device_info)
-{
- auto device_info = find_device_info(pcie_device_info);
- CHECK_EXPECTED(device_info);
-
- auto pcie_device_info_str = pcie_device_info_to_string(pcie_device_info);
- CHECK_EXPECTED(pcie_device_info_str);
-
- auto driver = HailoRTDriver::create(device_info->dev_path);
- CHECK_EXPECTED(driver);
-
- hailo_status status = HAILO_UNINITIALIZED;
- auto device = std::unique_ptr<PcieDevice>(new (std::nothrow) PcieDevice(driver.release(), pcie_device_info, status,
- pcie_device_info_str.release()));
- CHECK_AS_EXPECTED((nullptr != device), HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating PcieDevice");
- return device;
-}
-
-// same format as in lspci - [<domain>].<bus>.<device>.<func>
-// domain (0 to ffff) bus (0 to ff), device (0 to 1f) and function (0 to 7).
-static const char *DEVICE_ID_STRING_FMT_SHORT = "%02x:%02x.%d";
-static constexpr int DEVICE_ID_ELEMENTS_COUNT_SHORT = 3;
-static constexpr int DEVICE_ID_STRING_LENGTH_SHORT = 7; // Length without null terminator
-
-static const char *DEVICE_ID_STRING_FMT_LONG = "%04x:%02x:%02x.%d";
-static constexpr int DEVICE_ID_ELEMENTS_COUNT_LONG = 4;
-static constexpr int DEVICE_ID_STRING_LENGTH_LONG = 12; // Length without null terminator
-
-static constexpr int DEVICE_ID_MAX_STRING_LENGTH = std::max(DEVICE_ID_STRING_LENGTH_SHORT, DEVICE_ID_STRING_LENGTH_LONG);
-
-Expected<hailo_pcie_device_info_t> PcieDevice::parse_pcie_device_info(const std::string &device_info_str,
- bool log_on_failure)
-{
- hailo_pcie_device_info_t device_info{};
- int scanf_res = sscanf(device_info_str.c_str(), DEVICE_ID_STRING_FMT_LONG,
- &device_info.domain, &device_info.bus, &device_info.device, &device_info.func);
- if (DEVICE_ID_ELEMENTS_COUNT_LONG != scanf_res) {
- // Domain not included, trying short
- device_info.domain = HAILO_PCIE_ANY_DOMAIN;
- scanf_res = sscanf(device_info_str.c_str(), DEVICE_ID_STRING_FMT_SHORT,
- &device_info.bus, &device_info.device, &device_info.func);
- if (DEVICE_ID_ELEMENTS_COUNT_SHORT != scanf_res) {
- if (log_on_failure) {
- LOGGER__ERROR("Invalid device info string (format is [<domain>].<bus>.<device>.<func>) {}", device_info_str);
- }
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
- }
-
- return device_info;
-}
-
-Expected<std::string> PcieDevice::pcie_device_info_to_string(const hailo_pcie_device_info_t &device_info)
-{
- char device_string[DEVICE_ID_MAX_STRING_LENGTH + 1] = { 0 };
-
- if (HAILO_PCIE_ANY_DOMAIN != device_info.domain) {
- int res = snprintf(device_string, DEVICE_ID_STRING_LENGTH_LONG + 1, DEVICE_ID_STRING_FMT_LONG,
- device_info.domain, device_info.bus, device_info.device, device_info.func);
- // If the users give invalid device_info on release, they will get an invalid string.
- CHECK_AS_EXPECTED((DEVICE_ID_STRING_LENGTH_LONG) == res, HAILO_INVALID_ARGUMENT, "Invalid device info");
- }
- else {
- int res = snprintf(device_string, DEVICE_ID_STRING_LENGTH_SHORT + 1, DEVICE_ID_STRING_FMT_SHORT,
- device_info.bus, device_info.device, device_info.func);
- // If the users gives invalid device_info on release, they will get an invalid string.
- CHECK_AS_EXPECTED((DEVICE_ID_STRING_LENGTH_SHORT) == res, HAILO_INVALID_ARGUMENT, "Invalid device info");
- }
-
- return std::string(device_string);
-}
-
-PcieDevice::PcieDevice(HailoRTDriver &&driver, const hailo_pcie_device_info_t &device_info, hailo_status &status,
- const std::string &device_id) :
- VdmaDevice::VdmaDevice(std::move(driver), Device::Type::PCIE, device_id),
- m_device_info(device_info)
-{
- if (driver.is_fw_loaded()) {
- status = update_fw_state();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("update_fw_state() failed with status {}", status);
- return;
- }
- } else {
- LOGGER__WARNING("FW is not loaded to the device. Please load FW before using the device.");
- m_is_control_version_supported = false;
- }
-
- m_device_id = device_id;
-
- status = HAILO_SUCCESS;
-}
-
-void PcieDevice::set_is_control_version_supported(bool value)
-{
- m_is_control_version_supported = value;
-}
-
-Expected<hailo_device_architecture_t> PcieDevice::get_architecture() const
-{
- if (!m_driver.is_fw_loaded()) {
- LOGGER__WARNING("FW is not loaded to the device. Please load FW before using the device.");
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- return Expected<hailo_device_architecture_t>(m_device_architecture);
-}
-
-hailo_status PcieDevice::direct_write_memory(uint32_t address, const void *buffer, uint32_t size)
-{
- return m_driver.write_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
-}
-
-hailo_status PcieDevice::direct_read_memory(uint32_t address, void *buffer, uint32_t size)
-{
- return m_driver.read_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
-}
-
-const char *PcieDevice::get_dev_id() const
-{
- return m_device_id.c_str();
-}
-
-hailo_status PcieDevice::close_all_vdma_channels()
-{
- auto status = HAILO_UNINITIALIZED;
-
- // TODO: Add one icotl to stop all channels at once (HRT-6097)
- constexpr uint8_t PCIE_DEFAULT_ENGINE_INDEX = 0;
- for (uint8_t channel_index = 0; channel_index <= MAX_H2D_CHANNEL_INDEX; channel_index++) {
- const vdma::ChannelId channel_id = { PCIE_DEFAULT_ENGINE_INDEX, channel_index };
- auto host_registers = VdmaChannelRegs(m_driver, channel_id, HailoRTDriver::DmaDirection::H2D);
- status = host_registers.stop_channel();
- CHECK_SUCCESS(status);
-
- auto device_registers = VdmaChannelRegs(m_driver, channel_id, HailoRTDriver::DmaDirection::D2H);
- status = device_registers.stop_channel();
- CHECK_SUCCESS(status);
- }
-
- for (uint8_t channel_index = MIN_D2H_CHANNEL_INDEX; channel_index <= MAX_D2H_CHANNEL_INDEX; channel_index++) {
- const vdma::ChannelId channel_id = { PCIE_DEFAULT_ENGINE_INDEX, channel_index };
- auto host_registers = VdmaChannelRegs(m_driver, channel_id, HailoRTDriver::DmaDirection::D2H);
- status = host_registers.stop_channel();
- CHECK_SUCCESS(status);
-
- auto device_registers = VdmaChannelRegs(m_driver, channel_id, HailoRTDriver::DmaDirection::H2D);
- status = device_registers.stop_channel();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status PcieDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
- CONTROL_PROTOCOL__request_t request = {};
- size_t request_size = 0;
- uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
- size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
- CONTROL_PROTOCOL__response_header_t *header = NULL;
- CONTROL_PROTOCOL__payload_t *payload = NULL;
- bool is_expecting_response = true;
-
- CHECK(CONTROL_PROTOCOL__RESET_TYPE__CHIP != reset_type, HAILO_INVALID_OPERATION,
- "Chip reset is not supported for PCIe device.");
-
- if ((CONTROL_PROTOCOL__RESET_TYPE__FORCED_SOFT == reset_type) || (CONTROL_PROTOCOL__RESET_TYPE__SOFT == reset_type)) {
- is_expecting_response = false; // TODO: Check boot source, set is_expecting_response = (boot_source != pcie)
- status = close_all_vdma_channels();
- CHECK_SUCCESS(status);
- }
-
- common_status = CONTROL_PROTOCOL__pack_reset_request(&request, &request_size, m_control_sequence, reset_type);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- LOGGER__DEBUG("Sending reset request");
- status = this->fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
- // fw_interact should return failure if response is not expected
- // TODO: fix logic with respect to is_expecting_response, implement wait_for_wakeup();
- if (HAILO_SUCCESS == status) {
- status = Control::parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header,
- &payload, &request);
- CHECK_SUCCESS(status);
- CHECK(is_expecting_response, HAILO_INTERNAL_FAILURE, "Recived valid response from FW for control who is not expecting one.");
- } else if ((HAILO_FW_CONTROL_FAILURE == status) && (!is_expecting_response)){
- status = HAILO_SUCCESS;
- } else {
- return status;
- }
-
- LOGGER__DEBUG("Board has been reset successfully");
- return HAILO_SUCCESS;
-}
-
-Expected<HailoRTDriver::DeviceInfo> PcieDevice::find_device_info(const hailo_pcie_device_info_t &pcie_device_info)
-{
- auto scan_results = HailoRTDriver::scan_devices();
- CHECK_EXPECTED(scan_results);
-
- // Find device index based on the information from "device_info"
- for (const auto &scan_result : scan_results.value()) {
- const bool DONT_LOG_ON_FAILURE = false;
- auto scanned_info = parse_pcie_device_info(scan_result.device_id, DONT_LOG_ON_FAILURE);
- if (!scanned_info) {
- continue;
- }
-
- const bool match = (pcie_device_info.bus == scanned_info->bus) &&
- (pcie_device_info.device == scanned_info->device) &&
- (pcie_device_info.func == scanned_info->func) &&
- ((HAILO_PCIE_ANY_DOMAIN == pcie_device_info.domain) || (pcie_device_info.domain == scanned_info->domain));
- if (match) {
- return HailoRTDriver::DeviceInfo(scan_result);
- }
- }
-
- LOGGER__ERROR("Requested device not found");
- return make_unexpected(HAILO_INVALID_ARGUMENT);
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pcie_device.hpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#ifndef HAILO_PCIE_DEVICE_H_
-#define HAILO_PCIE_DEVICE_H_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "vdma_channel.hpp"
-#include "vdma_device.hpp"
-
-namespace hailort
-{
-
-class PcieDevice : public VdmaDevice {
-public:
- static Expected<std::vector<hailo_pcie_device_info_t>> scan();
- static Expected<std::unique_ptr<PcieDevice>> create();
- static Expected<std::unique_ptr<PcieDevice>> create(const hailo_pcie_device_info_t &device_info);
- static Expected<hailo_pcie_device_info_t> parse_pcie_device_info(const std::string &device_info_str,
- bool log_on_failure);
- static Expected<std::string> pcie_device_info_to_string(const hailo_pcie_device_info_t &device_info);
-
- virtual ~PcieDevice() = default;
-
- virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
- virtual hailo_status direct_write_memory(uint32_t address, const void *buffer, uint32_t size) override;
- virtual hailo_status direct_read_memory(uint32_t address, void *buffer, uint32_t size) override;
- virtual bool is_stream_interface_supported(const hailo_stream_interface_t& stream_interface) const override
- {
- switch (stream_interface) {
- case HAILO_STREAM_INTERFACE_ETH:
- case HAILO_STREAM_INTERFACE_CORE:
- return false;
- case HAILO_STREAM_INTERFACE_PCIE:
- case HAILO_STREAM_INTERFACE_MIPI:
- return true;
- default:
- LOGGER__ERROR("Invalid stream interface");
- return false;
- }
- }
-
- // TODO: used for tests
- void set_is_control_version_supported(bool value);
- virtual Expected<hailo_device_architecture_t> get_architecture() const override;
-
- const hailo_pcie_device_info_t get_device_info() const
- {
- return m_device_info;
- }
- virtual const char* get_dev_id() const override;
-
-private:
- PcieDevice(HailoRTDriver &&driver, const hailo_pcie_device_info_t &device_info, hailo_status &status,
- const std::string &device_id);
-
- hailo_status close_all_vdma_channels();
-
- static Expected<HailoRTDriver::DeviceInfo> find_device_info(const hailo_pcie_device_info_t &pcie_device_info);
-
- const hailo_pcie_device_info_t m_device_info;
- std::string m_device_id;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_PCIE_DEVICE_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pcie_stream.cpp
- **/
-
-#include "pcie_stream.hpp"
-
-namespace hailort
-{
-
-PcieInputStream::PcieInputStream(
- PcieDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status) :
- VdmaInputStream(device, std::move(channel), edge_layer, network_group_activated_event,
- batch_size, transfer_timeout, HAILO_STREAM_INTERFACE_PCIE, status)
- {}
-
-Expected<std::unique_ptr<PcieInputStream>> PcieInputStream::create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer,
- uint16_t batch_size, EventPtr network_group_activated_event)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- PcieDevice *pcie_device = reinterpret_cast<PcieDevice*>(&device);
- std::unique_ptr<PcieInputStream> local_stream(new (std::nothrow) PcieInputStream(*pcie_device,
- std::move(channel), edge_layer, std::move(network_group_activated_event), batch_size,
- DEFAULT_TRANSFER_TIMEOUT, status));
- CHECK((nullptr != local_stream), make_unexpected(HAILO_OUT_OF_HOST_MEMORY));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return local_stream;
-}
-
-Expected<std::unique_ptr<PcieOutputStream>> PcieOutputStream::create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- PcieDevice *pcie_device = reinterpret_cast<PcieDevice*>(&device);
-
- std::unique_ptr<PcieOutputStream> local_stream(new (std::nothrow) PcieOutputStream(*pcie_device,
- std::move(channel), edge_layer, std::move(network_group_activated_event),
- batch_size, DEFAULT_TRANSFER_TIMEOUT, status));
- CHECK((nullptr != local_stream), make_unexpected(HAILO_OUT_OF_HOST_MEMORY));
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return local_stream;
-}
-
-PcieOutputStream::PcieOutputStream(
- PcieDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status) :
- VdmaOutputStream(device, std::move(channel), edge_layer,
- network_group_activated_event, batch_size, transfer_timeout, status)
- {}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pcie_stream.hpp
- * @brief Stream object for PCIe device
- **/
-
-#ifndef _HAILO_PCIE_STREAM_H_
-#define _HAILO_PCIE_STREAM_H_
-
-#include "vdma_stream.hpp"
-#include "pcie_device.hpp"
-
-namespace hailort
-{
-
-class PcieInputStream : public VdmaInputStream {
-public:
- PcieInputStream(PcieInputStream &&other) = default;
- virtual ~PcieInputStream() = default;
-
- static Expected<std::unique_ptr<PcieInputStream>> create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event);
-
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_PCIE; }
-
-private:
- PcieInputStream(
- PcieDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status);
-};
-
-class PcieOutputStream : public VdmaOutputStream {
-public:
- PcieOutputStream(PcieOutputStream &&other) = default;
- virtual ~PcieOutputStream() = default;
-
- static Expected<std::unique_ptr<PcieOutputStream>> create(Device &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event);
-
- virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_PCIE; }
-
-private:
- explicit PcieOutputStream(
- PcieDevice &device,
- std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer,
- EventPtr network_group_activated_event,
- uint16_t batch_size,
- const std::chrono::milliseconds &transfer_timeout,
- hailo_status &status);
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_PCIE_STREAM_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pipeline.cpp
- * @brief Implemention of the pipeline
- **/
-
-#include "pipeline.hpp"
-#include "common/utils.hpp"
-#include "common/runtime_statistics_internal.hpp"
-
-namespace hailort
-{
-
-PipelineBuffer::Metadata::Metadata(PipelineTimePoint start_time) :
- m_start_time(start_time)
-{}
-
-PipelineBuffer::Metadata::Metadata() :
- Metadata(PipelineTimePoint{})
-{}
-
-PipelineTimePoint PipelineBuffer::Metadata::get_start_time() const
-{
- return m_start_time;
-}
-
-void PipelineBuffer::Metadata::set_start_time(PipelineTimePoint val)
-{
- m_start_time = val;
-}
-
-PipelineBuffer::PipelineBuffer() :
- PipelineBuffer(Type::DATA)
-{}
-
-PipelineBuffer::PipelineBuffer(Type type) :
- m_type(type),
- m_buffer(),
- m_should_release_buffer(false),
- m_pool(nullptr),
- m_view(),
- m_metadata()
-{}
-
-PipelineBuffer::PipelineBuffer(MemoryView view, bool should_measure) :
- m_type(Type::DATA),
- m_buffer(),
- m_should_release_buffer(false),
- m_pool(nullptr),
- m_view(view),
- m_metadata(Metadata(add_timestamp(should_measure)))
-{}
-
-PipelineBuffer::PipelineBuffer(Buffer &&buffer, BufferPoolPtr pool, bool should_measure) :
- m_type(Type::DATA),
- m_buffer(std::move(buffer)),
- m_should_release_buffer(true),
- m_pool(pool),
- m_view(m_buffer),
- m_metadata(Metadata(add_timestamp(should_measure)))
-{}
-
-PipelineBuffer::PipelineBuffer(PipelineBuffer &&other) :
- m_type(other.m_type),
- m_buffer(std::move(other.m_buffer)),
- m_should_release_buffer(std::exchange(other.m_should_release_buffer, false)),
- m_pool(std::move(other.m_pool)),
- m_view(std::move(other.m_view)),
- m_metadata(std::move(other.m_metadata))
-{}
-
-PipelineBuffer &PipelineBuffer::operator=(PipelineBuffer &&other)
-{
- m_type = other.m_type,
- m_buffer = std::move(other.m_buffer);
- m_should_release_buffer = std::exchange(other.m_should_release_buffer, false);
- m_pool = std::move(other.m_pool);
- m_view = std::move(other.m_view);
- m_metadata = std::move(other.m_metadata);
- return *this;
-}
-
-PipelineBuffer::~PipelineBuffer()
-{
- if (!m_should_release_buffer) {
- return;
- }
-
- hailo_status status = m_pool->release_buffer(std::move(m_buffer));
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Releasing buffer in buffer pool failed! status = {}", status);
- }
-}
-
-PipelineBuffer::operator bool() const
-{
- return !m_view.empty();
-}
-
-uint8_t* PipelineBuffer::data()
-{
- return m_view.data();
-}
-
-size_t PipelineBuffer::size() const
-{
- return m_view.size();
-}
-
-MemoryView PipelineBuffer::as_view()
-{
- return m_view;
-}
-
-PipelineBuffer::Type PipelineBuffer::get_type() const
-{
- return m_type;
-}
-
-PipelineBuffer::Metadata PipelineBuffer::get_metadata() const
-{
- return m_metadata;
-}
-
-void PipelineBuffer::set_metadata(Metadata &&val)
-{
- m_metadata = std::move(val);
-}
-
-PipelineTimePoint PipelineBuffer::add_timestamp(bool should_measure)
-{
- return should_measure ? std::chrono::steady_clock::now() : PipelineTimePoint{};
-}
-
-Expected<BufferPoolPtr> BufferPool::create(size_t buffer_size, size_t buffer_count, EventPtr shutdown_event,
- hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags)
-{
- AccumulatorPtr queue_size_accumulator = nullptr;
- if ((elem_flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
- queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
- CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
- const bool measure_vstream_latency = (vstream_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0;
-
- auto free_buffers = SpscQueue<Buffer>::create(buffer_count, shutdown_event, BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT);
- CHECK_EXPECTED(free_buffers);
-
- for (size_t i = 0; i < buffer_count; i++) {
- auto buffer = Buffer::create(buffer_size);
- CHECK_EXPECTED(buffer);
-
- hailo_status status = free_buffers->enqueue(buffer.release());
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- auto buffer_pool_ptr = make_shared_nothrow<BufferPool>(buffer_size, measure_vstream_latency,
- free_buffers.release(), std::move(queue_size_accumulator));
- CHECK_AS_EXPECTED(nullptr != buffer_pool_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return buffer_pool_ptr;
-}
-
-BufferPool::BufferPool(size_t buffer_size, bool measure_vstream_latency, SpscQueue<Buffer> &&free_buffers, AccumulatorPtr &&queue_size_accumulator) :
- m_buffer_size(buffer_size),
- m_measure_vstream_latency(measure_vstream_latency),
- m_free_buffers(std::move(free_buffers)),
- m_queue_size_accumulator(std::move(queue_size_accumulator))
-{}
-
-size_t BufferPool::buffer_size()
-{
- return m_buffer_size;
-}
-
-Expected<PipelineBuffer> BufferPool::acquire_buffer(std::chrono::milliseconds timeout)
-{
- if (nullptr != m_queue_size_accumulator) {
- m_queue_size_accumulator->add_data_point(static_cast<double>(m_free_buffers.size_approx()));
- }
- auto buffer = m_free_buffers.dequeue(timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- return make_unexpected(buffer.status());
- }
- else if (HAILO_TIMEOUT == buffer.status()) {
- LOGGER__WARNING(
- "Failed to acquire buffer because the buffer pool is empty. This could be caused by uneven reading and writing speeds, with a short user-defined timeout. (timeout={}ms)",
- timeout.count());
- return make_unexpected(buffer.status());
- }
- CHECK_EXPECTED(buffer);
- return PipelineBuffer(buffer.release(), shared_from_this(), m_measure_vstream_latency);
-}
-
-AccumulatorPtr BufferPool::get_queue_size_accumulator()
-{
- return m_queue_size_accumulator;
-}
-
-Expected<PipelineBuffer> BufferPool::get_available_buffer(PipelineBuffer &&optional, std::chrono::milliseconds timeout)
-{
- if (optional) {
- CHECK_AS_EXPECTED(optional.size() == buffer_size(), HAILO_INVALID_OPERATION,
- "Optional buffer size must be equal to pool buffer size. Optional buffer size = {}, buffer pool size = {}",
- optional.size(), buffer_size());
- return std::move(optional);
- }
-
- auto acquired_buffer = acquire_buffer(timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
- return make_unexpected(acquired_buffer.status());
- }
- CHECK_EXPECTED(acquired_buffer, "Failed to acquire buffer with status={}", acquired_buffer.status());
- return acquired_buffer.release();
-}
-
-hailo_status BufferPool::release_buffer(Buffer &&buffer)
-{
- std::unique_lock<std::mutex> lock(m_release_buffer_mutex);
- // This can be called after the shutdown event was signaled so we ignore it here
- return m_free_buffers.enqueue(std::move(buffer), true);
-}
-
-Expected<DurationCollector> DurationCollector::create(hailo_pipeline_elem_stats_flags_t flags,
- uint32_t num_frames_before_collection_start)
-{
- AccumulatorPtr latency_accumulator = nullptr;
- const auto measure_latency = should_measure_latency(flags);
- if (measure_latency) {
- latency_accumulator = make_shared_nothrow<FullAccumulator<double>>("latency");
- CHECK_AS_EXPECTED(nullptr != latency_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
-
- AccumulatorPtr average_fps_accumulator = nullptr;
- const auto measure_average_fps = should_measure_average_fps(flags);
- if (measure_average_fps) {
- average_fps_accumulator = make_shared_nothrow<AverageFPSAccumulator<double>>("fps");
- CHECK_AS_EXPECTED(nullptr != average_fps_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
-
- return DurationCollector(measure_latency, measure_average_fps, std::move(latency_accumulator),
- std::move(average_fps_accumulator), num_frames_before_collection_start);
-}
-
-DurationCollector::DurationCollector(bool measure_latency, bool measure_average_fps,
- AccumulatorPtr &&latency_accumulator, AccumulatorPtr &&average_fps_accumulator,
- uint32_t num_frames_before_collection_start) :
- m_measure_latency(measure_latency),
- m_measure_average_fps(measure_average_fps),
- m_measure(m_measure_latency || m_measure_average_fps),
- m_latency_accumulator(std::move(latency_accumulator)),
- m_average_fps_accumulator(std::move(average_fps_accumulator)),
- m_start(),
- m_count(0),
- m_num_frames_before_collection_start(num_frames_before_collection_start)
-{}
-
-void DurationCollector::start_measurement()
-{
- if (!m_measure) {
- return;
- }
-
- m_count++;
- if (m_count < m_num_frames_before_collection_start) {
- return;
- }
-
- m_start = std::chrono::steady_clock::now();
-}
-
-void DurationCollector::complete_measurement()
-{
- if ((!m_measure) || (m_count < m_num_frames_before_collection_start)) {
- return;
- }
-
- const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
- std::chrono::steady_clock::now() - m_start).count();
- if (m_measure_latency) {
- m_latency_accumulator->add_data_point(duration_sec);
- }
-
- if (m_measure_average_fps) {
- m_average_fps_accumulator->add_data_point(duration_sec);
- }
-}
-
-AccumulatorPtr DurationCollector::get_latency_accumulator()
-{
- return m_latency_accumulator;
-}
-
-AccumulatorPtr DurationCollector::get_average_fps_accumulator()
-{
- return m_average_fps_accumulator;
-}
-
-bool DurationCollector::should_measure_latency(hailo_pipeline_elem_stats_flags_t flags)
-{
- return (flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_LATENCY) != 0;
-}
-
-bool DurationCollector::should_measure_average_fps(hailo_pipeline_elem_stats_flags_t flags)
-{
- return (flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_FPS) != 0;
-}
-
-PipelineObject::PipelineObject(const std::string &name) : m_name(name)
-{}
-
-const std::string &PipelineObject::name() const
-{
- return m_name;
-}
-
-std::string PipelineObject::create_element_name(const std::string &element_name, const std::string &stream_name, uint8_t stream_index)
-{
- std::stringstream name;
- name << element_name << static_cast<uint32_t>(stream_index) << "_" << stream_name;
- return name.str();
-}
-
-hailo_status PipelinePad::link_pads(std::shared_ptr<PipelineElement> left, std::shared_ptr<PipelineElement> right,
- uint32_t left_source_index, uint32_t right_sink_index)
-{
- CHECK_ARG_NOT_NULL(left);
- CHECK_ARG_NOT_NULL(right);
- return link_pads(*left, *right, left_source_index, right_sink_index);
-}
-
-hailo_status PipelinePad::link_pads(PipelineElement &left, PipelineElement &right, uint32_t left_source_index,
- uint32_t right_sink_index)
-{
- CHECK(left_source_index < left.sources().size(), HAILO_INVALID_ARGUMENT,
- "Cannot link source pad #{} for PipelineElement '{}', it has only {} source pads.",
- left_source_index, left.name(), left.sources().size());
- CHECK(right_sink_index < right.sinks().size(), HAILO_INVALID_ARGUMENT,
- "Cannot link sink pad #{} for PipelineElement '{}', it has only {} sink pads.",
- right_sink_index, right.name(), right.sinks().size());
- auto &left_source_pad = left.sources()[left_source_index];
- auto &right_sink_pad = right.sinks()[right_sink_index];
-
- left_source_pad.set_next(&right_sink_pad);
- right_sink_pad.set_prev(&left_source_pad);
-
- return HAILO_SUCCESS;
-}
-
-// Initial value of the counter
-uint32_t PipelinePad::index = 0;
-std::string PipelinePad::create_pad_name(const std::string &element_name, Type pad_type)
-{
- std::stringstream string_stream;
- const auto pad_type_name = (pad_type == Type::SINK) ? "sink" : "source";
- string_stream << element_name << "(" << pad_type_name << index++ << ")";
- return string_stream.str();
-}
-
-PipelinePad::PipelinePad(PipelineElement &element, const std::string &element_name, Type pad_type) :
- PipelineObject(create_pad_name(element_name, pad_type)),
- m_element(element),
- m_next(nullptr),
- m_prev(nullptr),
- m_push_complete_callback(nullptr),
- m_pull_complete_callback(nullptr)
-{}
-
-hailo_status PipelinePad::activate()
-{
- return m_element.activate();
-}
-
-hailo_status PipelinePad::deactivate()
-{
- return m_element.deactivate();
-}
-
-hailo_status PipelinePad::post_deactivate()
-{
- return m_element.post_deactivate();
-}
-
-hailo_status PipelinePad::clear()
-{
- return m_element.clear();
-}
-
-hailo_status PipelinePad::flush()
-{
- return m_element.flush();
-}
-
-hailo_status PipelinePad::abort()
-{
- return m_element.abort();
-}
-
-hailo_status PipelinePad::wait_for_finish()
-{
- return m_element.wait_for_finish();
-}
-
-hailo_status PipelinePad::resume()
-{
- return m_element.resume();
-}
-
-hailo_status PipelinePad::run_push(PipelineBuffer &&buffer)
-{
- if (m_push_complete_callback) {
- auto metadata = buffer.get_metadata();
- const auto status = m_element.run_push(std::move(buffer));
- m_push_complete_callback(metadata);
- return status;
- }
-
- return m_element.run_push(std::move(buffer));
-}
-
-Expected<PipelineBuffer> PipelinePad::run_pull(PipelineBuffer &&optional)
-{
- auto result = m_element.run_pull(std::move(optional), *this);
- if (m_pull_complete_callback && result) {
- m_pull_complete_callback(result->get_metadata());
- }
-
- return result;
-}
-
-void PipelinePad::set_push_complete_callback(PushCompleteCallback push_complete_callback)
-{
- m_push_complete_callback = push_complete_callback;
-}
-
-void PipelinePad::set_pull_complete_callback(PullCompleteCallback pull_complete_callback)
-{
- m_pull_complete_callback = pull_complete_callback;
-}
-
-void PipelinePad::set_next(PipelinePad *next)
-{
- m_next = next;
-}
-
-void PipelinePad::set_prev(PipelinePad *prev)
-{
- m_prev = prev;
-}
-
-PipelinePad *PipelinePad::next()
-{
- return m_next;
-}
-
-PipelinePad *PipelinePad::prev()
-{
- return m_prev;
-}
-
-PipelineElement &PipelinePad::element()
-{
- return m_element;
-}
-
-const PipelinePad *PipelinePad::next() const
-{
- return m_next;
-}
-
-const PipelinePad *PipelinePad::prev() const
-{
- return m_prev;
-}
-
-const PipelineElement &PipelinePad::element() const
-{
- return m_element;
-}
-
-SourceElement::SourceElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
-{
- m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
-}
-
-PipelinePad &SourceElement::source()
-{
- return m_sources[0];
-}
-
-SinkElement::SinkElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
-{
- m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
-}
-
-PipelinePad &SinkElement::sink()
-{
- return m_sinks[0];
-}
-
-IntermediateElement::IntermediateElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
-{
- m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
- m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
-}
-
-std::vector<PipelinePad*> IntermediateElement::execution_pads()
-{
- std::vector<PipelinePad*> result{&next_pad()};
- return result;
-}
-
-PipelineElement::PipelineElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- PipelineObject(name),
- m_duration_collector(std::move(duration_collector)),
- m_pipeline_status(std::move(pipeline_status)),
- m_sinks(),
- m_sources()
-{}
-
-AccumulatorPtr PipelineElement::get_fps_accumulator()
-{
- return m_duration_collector.get_average_fps_accumulator();
-}
-
-AccumulatorPtr PipelineElement::get_latency_accumulator()
-{
- return m_duration_collector.get_latency_accumulator();
-}
-
-std::vector<AccumulatorPtr> PipelineElement::get_queue_size_accumulators()
-{
- return std::vector<AccumulatorPtr>();
-}
-
-std::vector<PipelinePad> &PipelineElement::sinks()
-{
- return m_sinks;
-}
-
-std::vector<PipelinePad> &PipelineElement::sources()
-{
- return m_sources;
-}
-
-const std::vector<PipelinePad> &PipelineElement::sinks() const
-{
- return m_sinks;
-}
-
-const std::vector<PipelinePad> &PipelineElement::sources() const
-{
- return m_sources;
-}
-
-std::string PipelineElement::description() const
-{
- std::stringstream element_description;
- element_description << "(" << this->name() << ")";
- return element_description.str();
-}
-
-hailo_status PipelineElement::activate()
-{
- return execute_activate();
-}
-
-hailo_status PipelineElement::deactivate()
-{
- return execute_deactivate();
-}
-
-hailo_status PipelineElement::post_deactivate()
-{
- return execute_post_deactivate();
-}
-
-hailo_status PipelineElement::clear()
-{
- return execute_clear();
-}
-
-hailo_status PipelineElement::flush()
-{
- return execute_flush();
-}
-
-hailo_status PipelineElement::abort()
-{
- return execute_abort();
-}
-
-hailo_status PipelineElement::resume()
-{
- return execute_resume();
-}
-
-hailo_status PipelineElement::wait_for_finish()
-{
- return execute_wait_for_finish();
-}
-
-hailo_status PipelineElement::execute_activate()
-{
- return execute([&](auto *pad){ return pad->activate(); });
-}
-
-hailo_status PipelineElement::execute_deactivate()
-{
- return execute([&](auto *pad){ return pad->deactivate(); });
-}
-
-hailo_status PipelineElement::execute_post_deactivate()
-{
- return execute([&](auto *pad){ return pad->post_deactivate(); });
-}
-
-hailo_status PipelineElement::execute_clear()
-{
- return execute([&](auto *pad){ return pad->clear(); });
-}
-
-hailo_status PipelineElement::execute_flush()
-{
- return execute([&](auto *pad){ return pad->flush(); });
-}
-
-hailo_status PipelineElement::execute_abort()
-{
- return execute([&](auto *pad){ return pad->abort(); });
-}
-
-hailo_status PipelineElement::execute_resume()
-{
- return execute([&](auto *pad){ return pad->resume(); });
-}
-
-hailo_status PipelineElement::execute_wait_for_finish()
-{
- return execute([&](auto *pad){ return pad->wait_for_finish(); });
-}
-
-hailo_status PipelineElement::execute(std::function<hailo_status(PipelinePad*)> func)
-{
- for (auto pad : execution_pads()) {
- auto status = func(pad);
- CHECK_SUCCESS(status);
- }
- return HAILO_SUCCESS;
-}
-
-std::vector<PipelinePad*> SourceElement::execution_pads()
-{
- std::vector<PipelinePad*> result{&source()};
- return result;
-}
-
-std::vector<PipelinePad*> SinkElement::execution_pads()
-{
- std::vector<PipelinePad*> result{&sink()};
- return result;
-}
-
-FilterElement::FilterElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status))
-{}
-
-hailo_status FilterElement::run_push(PipelineBuffer &&buffer)
-{
- auto output = action(std::move(buffer), PipelineBuffer());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
- return output.status();
- }
- CHECK_EXPECTED_AS_STATUS(output);
-
- hailo_status status = next_pad().run_push(output.release());
- if (status == HAILO_SHUTDOWN_EVENT_SIGNALED) {
- LOGGER__INFO("run_push of {} was shutdown!", name());
- return status;
- }
- if (status == HAILO_STREAM_ABORTED_BY_USER) {
- LOGGER__INFO("run_push of {} was aborted!", name());
- return status;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<PipelineBuffer> FilterElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
-{
- auto buffer = next_pad().run_pull();
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- LOGGER__INFO("run_pull in FilterElement was shutdown!");
- return make_unexpected(buffer.status());
- }
- CHECK_EXPECTED(buffer);
- return action(buffer.release(), std::move(optional));
-}
-
-Expected<SpscQueue<PipelineBuffer>> BaseQueueElement::create_queue(size_t queue_size, EventPtr shutdown_event)
-{
- auto queue = SpscQueue<PipelineBuffer>::create(queue_size, shutdown_event);
- CHECK_EXPECTED(queue);
-
- return queue.release();
-}
-
-BaseQueueElement::BaseQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- Event &&activation_event, Event &&deactivation_event) :
- IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_queue(std::move(queue)),
- m_shutdown_event(shutdown_event),
- m_timeout(timeout),
- m_is_thread_running(true),
- m_activation_event(std::move(activation_event)),
- m_deactivation_event(std::move(deactivation_event)),
- m_queue_size_accumulator(std::move(queue_size_accumulator)),
- m_is_run_in_thread_running(false)
-{}
-
-BaseQueueElement::~BaseQueueElement()
-{
- LOGGER__INFO("Queue element {} has {} frames in his Queue on destruction", name(), m_queue.size_approx());
-}
-
-void BaseQueueElement::start_thread()
-{
- m_thread = std::thread([this] () {
- while (m_is_thread_running.load()) {
- auto status = m_activation_event.wait(INIFINITE_TIMEOUT());
-
- if (!m_is_thread_running) {
- LOGGER__INFO("Thread in element {} is not running anymore, exiting..", this->name());
- break;
- }
- if (HAILO_SUCCESS == status) {
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_is_run_in_thread_running = true;
- }
- m_cv.notify_all();
-
- status = run_in_thread();
-
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_is_run_in_thread_running = false;
- }
- m_cv.notify_all();
- }
-
- if (HAILO_SUCCESS != status) {
- if (HAILO_SHUTDOWN_EVENT_SIGNALED != status) {
- // We do not want to log error for HAILO_STREAM_ABORTED_BY_USER
- if (HAILO_STREAM_ABORTED_BY_USER != status) {
- LOGGER__ERROR("Queue element {} run in thread function failed! status = {}", this->name(), status);
- }
-
- // Store the real error in pipeline_status
- m_pipeline_status->store(status);
-
- // Signal other threads to stop
- hailo_status shutdown_status = m_shutdown_event->signal();
- if (HAILO_SUCCESS != shutdown_status) {
- LOGGER__CRITICAL("Failed shutting down queue with status {}", shutdown_status);
- }
- }
- //Thread has done its execution. Mark to the thread to wait for activation again
- hailo_status event_status = m_activation_event.reset();
- if (HAILO_SUCCESS != event_status) {
- LOGGER__CRITICAL("Failed reset activation event of element {}, with status {}", this->name(), event_status);
- }
-
- // Mark to deactivation function that the thread is done
- event_status = m_deactivation_event.signal();
- if (HAILO_SUCCESS != event_status) {
- LOGGER__CRITICAL("Failed signaling deactivation event of element {}, with status {}", this->name(), event_status);
- }
- }
- }
- });
-}
-
-void BaseQueueElement::stop_thread()
-{
- m_shutdown_event->signal();
-
- // Mark thread as not running, then wake it in case it is waiting on m_activation_event
- m_is_thread_running = false;
- m_activation_event.signal();
-
- if (m_thread.joinable()) {
- m_thread.join();
- }
-}
-
-std::vector<AccumulatorPtr> BaseQueueElement::get_queue_size_accumulators()
-{
- if (nullptr == m_queue_size_accumulator) {
- return std::vector<AccumulatorPtr>();
- }
- return {m_queue_size_accumulator};
-}
-
-hailo_status BaseQueueElement::execute_activate()
-{
- hailo_status status = PipelineElement::execute_activate();
- CHECK_SUCCESS(status);
-
- status = m_activation_event.signal();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status BaseQueueElement::execute_post_deactivate()
-{
- hailo_status status = m_deactivation_event.wait(INIFINITE_TIMEOUT());
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to post_deactivate() in {} with status {}", name(), status);
- }
-
- status = m_deactivation_event.reset();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to reset of deactivation event in {} with status {}", name(), status);
- }
-
- return PipelineElement::execute_post_deactivate();
-}
-
-hailo_status BaseQueueElement::execute_clear()
-{
- auto status = PipelineElement::execute_clear();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to clear() in {} with status {}", name(), status);
- }
-
- auto queue_status = m_queue.clear();
- CHECK_SUCCESS(queue_status, "Failed to clear() queue in {} with status {}", name(), status);
-
- return status;
-}
-
-hailo_status BaseQueueElement::execute_wait_for_finish()
-{
- std::unique_lock<std::mutex> lock(m_mutex);
- m_cv.wait(lock, [this] () {
- return !m_is_run_in_thread_running;
- });
- return HAILO_SUCCESS;
-}
-
-hailo_status PushQueueElement::execute_abort()
-{
- auto status = m_shutdown_event->reset();
- CHECK_SUCCESS(status);
- m_pipeline_status->store(HAILO_STREAM_ABORTED_BY_USER);
- status = PipelineElement::execute_abort();
- CHECK_SUCCESS(status);
- return m_activation_event.signal();
-}
-
-hailo_status BaseQueueElement::execute_resume()
-{
- auto status = m_shutdown_event->reset();
- CHECK_SUCCESS(status);
- m_pipeline_status->store(HAILO_SUCCESS);
- status = PipelineElement::execute_resume();
- CHECK_SUCCESS(status);
- return m_activation_event.signal();
-}
-
-hailo_status BaseQueueElement::set_timeout(std::chrono::milliseconds timeout)
-{
- m_timeout = timeout;
- return HAILO_SUCCESS;
-}
-
-std::string BaseQueueElement::description() const
-{
- std::stringstream element_description;
-
- element_description << "(" << this->name();
- if (HAILO_INFINITE != this->m_timeout.count()) {
- element_description << " | timeout: " << std::chrono::duration_cast<std::chrono::seconds>(this->m_timeout).count() << "s";
- }
- element_description << ")";
-
- return element_description.str();
-}
-
-hailo_status BaseQueueElement::pipeline_status()
-{
- auto status = m_pipeline_status->load();
-
- // We treat HAILO_STREAM_ABORTED_BY_USER as success because it is caused by user action (aborting streams)
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return HAILO_SUCCESS;
- }
- return status;
-}
-
-Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
- size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
- CHECK_EXPECTED(queue);
-
- auto activation_event = Event::create(Event::State::not_signalled);
- CHECK_EXPECTED(activation_event);
-
- auto deactivation_event = Event::create(Event::State::not_signalled);
- CHECK_EXPECTED(deactivation_event);
-
- // TODO: Support fps/latency collection for queue elems (HRT-7711)
- auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
- CHECK_EXPECTED(duration_collector);
-
- AccumulatorPtr queue_size_accumulator = nullptr;
- if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
- queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
- CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
-
- auto queue_ptr = make_shared_nothrow<PushQueueElement>(queue.release(), shutdown_event, name, timeout,
- duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
- activation_event.release(), deactivation_event.release());
- CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PushQueueElement {} failed!", name);
-
- LOGGER__INFO("Created {}", queue_ptr->name());
-
- return queue_ptr;
-}
-
-Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return PushQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
- vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
-}
-
-PushQueueElement::PushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- Event &&activation_event, Event &&deactivation_event) :
- BaseQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
- std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event))
-{
- start_thread();
-}
-
-PushQueueElement::~PushQueueElement()
-{
- stop_thread();
-}
-
-hailo_status PushQueueElement::run_push(PipelineBuffer &&buffer)
-{
- // TODO: Support fps/latency collection for queue elems (HRT-7711)
- if (nullptr != m_queue_size_accumulator) {
- m_queue_size_accumulator->add_data_point(static_cast<double>(m_queue.size_approx()));
- }
- auto status = m_pipeline_status->load();
- if (status == HAILO_STREAM_ABORTED_BY_USER) {
- LOGGER__INFO("run_push of {} was aborted!", name());
- return status;
- }
- CHECK_SUCCESS(m_pipeline_status->load());
- status = m_queue.enqueue(std::move(buffer), m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- auto queue_thread_status = pipeline_status();
- CHECK_SUCCESS(queue_thread_status,
- "Shutdown event was signaled in enqueue of queue element {} because thread has failed with status={}!", name(),
- queue_thread_status);
- LOGGER__INFO("Shutdown event was signaled in enqueue of queue element {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- CHECK_SUCCESS(status);
- return HAILO_SUCCESS;
-}
-
-Expected<PipelineBuffer> PushQueueElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
-{
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-hailo_status PushQueueElement::execute_deactivate()
-{
- // Mark to the threads that deactivate() was called.
- hailo_status status = m_queue.enqueue(PipelineBuffer(PipelineBuffer::Type::DEACTIVATE));
- if (HAILO_SUCCESS != status) {
- // We want to deactivate source even if enqueue failed
- auto deactivation_status = PipelineElement::execute_deactivate();
- CHECK_SUCCESS(deactivation_status);
- if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_SHUTDOWN_EVENT_SIGNALED == status)) {
- LOGGER__INFO("enqueue() in element {} was aborted, got status = {}", name(), status);
- }
- else {
- LOGGER__ERROR("enqueue() in element {} failed, got status = {}", name(), status);
- return status;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-PipelinePad &PushQueueElement::next_pad()
-{
- // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
- return *m_sources[0].next();
-}
-
-hailo_status PushQueueElement::run_in_thread()
-{
- auto buffer = m_queue.dequeue(INIFINITE_TIMEOUT());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- LOGGER__INFO("Shutdown event was signaled in dequeue of queue element {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- CHECK_EXPECTED_AS_STATUS(buffer);
-
- // Return if deactivated
- if (PipelineBuffer::Type::DEACTIVATE == buffer->get_type()) {
- hailo_status status = m_shutdown_event->signal();
- CHECK_SUCCESS(status);
-
- status = next_pad().deactivate();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Deactivate of source in {} has failed with status {}", name(), status);
- }
-
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
-
- hailo_status status = next_pad().run_push(buffer.release());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("run_push of {} was aborted!", name());
- return status;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
- size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
- CHECK_EXPECTED(queue);
-
- auto activation_event = Event::create(Event::State::not_signalled);
- CHECK_EXPECTED(activation_event);
-
- auto deactivation_event = Event::create(Event::State::not_signalled);
- CHECK_EXPECTED(deactivation_event);
-
- // TODO: Support fps/latency collection for queue elems (HRT-7711)
- auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
- CHECK_EXPECTED(duration_collector);
-
- AccumulatorPtr queue_size_accumulator = nullptr;
- if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
- queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
- CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
-
- auto queue_ptr = make_shared_nothrow<PullQueueElement>(queue.release(), shutdown_event, name, timeout,
- duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
- activation_event.release(), deactivation_event.release());
- CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PullQueueElement {} failed!", name);
-
- LOGGER__INFO("Created {}", queue_ptr->name());
-
- return queue_ptr;
-}
-Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return PullQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
- vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
-}
-
-PullQueueElement::PullQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- Event &&activation_event, Event &&deactivation_event) :
- BaseQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
- std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event))
-{
- start_thread();
-}
-
-PullQueueElement::~PullQueueElement()
-{
- stop_thread();
-}
-
-hailo_status PullQueueElement::run_push(PipelineBuffer &&/*buffer*/)
-{
- return HAILO_INVALID_OPERATION;
-}
-
-Expected<PipelineBuffer> PullQueueElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*sink*/)
-{
- // TODO: Support fps/latency collection for queue elems (HRT-7711)
- CHECK_AS_EXPECTED(!optional, HAILO_INVALID_ARGUMENT, "Optional buffer is not allowed in queue element!");
-
- if (nullptr != m_queue_size_accumulator) {
- m_queue_size_accumulator->add_data_point(static_cast<double>(m_queue.size_approx()));
- }
- auto output = m_queue.dequeue(m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
- auto queue_thread_status = pipeline_status();
- CHECK_SUCCESS_AS_EXPECTED(queue_thread_status,
- "Shutdown event was signaled in dequeue of queue element {} because thread has failed with status={}!", name(),
- queue_thread_status);
- LOGGER__INFO("Shutdown event was signaled in dequeue of queue element {}!", name());
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
- CHECK_EXPECTED(output);
-
- return output;
-}
-
-hailo_status PullQueueElement::execute_deactivate()
-{
- hailo_status status = PipelineElement::execute_deactivate();
- auto shutdown_event_status = m_shutdown_event->signal();
- CHECK_SUCCESS(status);
- CHECK_SUCCESS(shutdown_event_status);
-
- return HAILO_SUCCESS;
-}
-
-PipelinePad &PullQueueElement::next_pad()
-{
- // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
- return *m_sinks[0].prev();
-}
-
-hailo_status PullQueueElement::run_in_thread()
-{
- auto buffer = next_pad().run_pull();
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- LOGGER__INFO("Shutdown event was signaled in run_pull of queue element {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- if (HAILO_STREAM_ABORTED_BY_USER == buffer.status()) {
- LOGGER__INFO("run_pull of queue element {} was aborted!", name());
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- if (HAILO_NETWORK_GROUP_NOT_ACTIVATED == buffer.status()) {
- LOGGER__INFO("run_pull of queue element {} was called before network_group is activated!", name());
- return HAILO_NETWORK_GROUP_NOT_ACTIVATED;
- }
- CHECK_EXPECTED_AS_STATUS(buffer);
-
- hailo_status status = m_queue.enqueue(buffer.release(), INIFINITE_TIMEOUT());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- LOGGER__INFO("Shutdown event was signaled in enqueue of queue element {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
- hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto pending_buffer_queue = BaseQueueElement::create_queue(1, shutdown_event);
- CHECK_EXPECTED(pending_buffer_queue);
-
- auto full_buffer_queue = BaseQueueElement::create_queue(1, shutdown_event);
- CHECK_EXPECTED(full_buffer_queue);
-
- auto activation_event = Event::create(Event::State::not_signalled);
- CHECK_EXPECTED(activation_event);
-
- auto deactivation_event = Event::create(Event::State::not_signalled);
- CHECK_EXPECTED(deactivation_event);
-
- // TODO: Support fps/latency collection for queue elems (HRT-7711)
- auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
- CHECK_EXPECTED(duration_collector);
-
- AccumulatorPtr queue_size_accumulator = nullptr;
- if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
- queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
- CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
-
- auto queue_ptr = make_shared_nothrow<UserBufferQueueElement>(pending_buffer_queue.release(),
- full_buffer_queue.release(), shutdown_event, name, timeout, duration_collector.release(),
- std::move(queue_size_accumulator), std::move(pipeline_status), activation_event.release(),
- deactivation_event.release());
- CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating UserBufferQueueElement {} failed!", name);
-
- LOGGER__INFO("Created {}", queue_ptr->name());
-
- return queue_ptr;
-}
-
-Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return UserBufferQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
- vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
-}
-
-UserBufferQueueElement::UserBufferQueueElement(SpscQueue<PipelineBuffer> &&queue, SpscQueue<PipelineBuffer> &&full_buffer_queue,
- EventPtr shutdown_event, const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- Event &&activation_event, Event &&deactivation_event) :
- PullQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector),
- std::move(queue_size_accumulator), std::move(pipeline_status), std::move(activation_event),
- std::move(deactivation_event)),
- m_full_buffer_queue(std::move(full_buffer_queue))
-{}
-
-Expected<PipelineBuffer> UserBufferQueueElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
-{
- // TODO: Support fps/latency collection for queue elems (HRT-7711)
- CHECK_AS_EXPECTED(optional, HAILO_INVALID_ARGUMENT, "Optional buffer must be valid in {}!", name());
-
- hailo_status status = m_queue.enqueue(std::move(optional), m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- LOGGER__INFO("Shutdown event was signaled in enqueue of queue element {}!", name());
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- if (nullptr != m_queue_size_accumulator) {
- m_queue_size_accumulator->add_data_point(static_cast<double>(m_full_buffer_queue.size_approx()));
- }
- auto output = m_full_buffer_queue.dequeue(m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
- LOGGER__INFO("Shutdown event was signaled in dequeue of queue element {}!", name());
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != output.status(), HAILO_TIMEOUT, "{} (D2H) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_timeout.count());
- CHECK_EXPECTED(output);
-
- CHECK_AS_EXPECTED(output->data() == optional.data(), HAILO_INTERNAL_FAILURE, "The buffer received in {} was not the same as the user buffer!", name());
- return output;
-}
-
-hailo_status UserBufferQueueElement::execute_clear()
-{
- auto status = PipelineElement::execute_clear();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to clear() in {} with status {}", name(), status);
- }
-
- auto queue_clear_status = m_full_buffer_queue.clear();
- if (HAILO_SUCCESS != queue_clear_status) {
- LOGGER__ERROR("Failed to clear() in {} with status {}", name(), queue_clear_status);
- status = queue_clear_status;
- }
-
- queue_clear_status = m_queue.clear();
- if (HAILO_SUCCESS != queue_clear_status) {
- LOGGER__ERROR("Failed to clear() in {} with status {}", name(), queue_clear_status);
- status = queue_clear_status;
- }
-
- return status;
-}
-
-hailo_status UserBufferQueueElement::run_in_thread()
-{
- auto optional = m_queue.dequeue(INIFINITE_TIMEOUT());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == optional.status()) {
- LOGGER__INFO("Shutdown event was signaled in dequeue of {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- CHECK_EXPECTED_AS_STATUS(optional);
-
- auto buffer = next_pad().run_pull(optional.release());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- LOGGER__INFO("Shutdown event was signaled in run_pull of {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- if (HAILO_STREAM_ABORTED_BY_USER == buffer.status()) {
- LOGGER__INFO("run_pull of {} was aborted!", name());
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- CHECK_EXPECTED_AS_STATUS(buffer);
-
- hailo_status status = m_full_buffer_queue.enqueue(buffer.release(), INIFINITE_TIMEOUT());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- LOGGER__INFO("Shutdown event was signaled in enqueue of {}!", name());
- return HAILO_SHUTDOWN_EVENT_SIGNALED;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-BaseMuxElement::BaseMuxElement(size_t sink_count, const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- PipelineElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_timeout(timeout)
-{
- m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
- m_sinks.reserve(sink_count);
- for (uint32_t i = 0; i < sink_count; ++i) {
- m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
- }
-}
-
-std::vector<PipelinePad*> BaseMuxElement::execution_pads()
-{
- std::vector<PipelinePad*> result;
- result.reserve(m_sinks.size());
- for (auto& pad : m_sinks) {
- result.push_back(pad.prev());
- }
- return result;
-}
-
-hailo_status BaseMuxElement::run_push(PipelineBuffer &&/*buffer*/)
-{
- return HAILO_NOT_IMPLEMENTED;
-}
-
-Expected<PipelineBuffer> BaseMuxElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
-{
- std::vector<PipelineBuffer> inputs;
- inputs.reserve(m_sinks.size());
- for (auto &sink : m_sinks) {
- auto buffer = sink.prev()->run_pull();
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- return make_unexpected(buffer.status());
- }
- CHECK_EXPECTED(buffer);
-
- inputs.push_back(buffer.release());
- }
-
- auto output = action(std::move(inputs), std::move(optional));
- CHECK_EXPECTED(output);
-
- return output;
-}
-
-BaseDemuxElement::BaseDemuxElement(size_t source_count, const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- PipelineElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_timeout(timeout),
- m_is_activated(false),
- m_was_stream_aborted(false),
- m_index_of_source(),
- m_was_source_called(source_count, false),
- m_buffers_for_action()
-{
- m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
- m_sources.reserve(source_count);
- for (uint32_t i = 0; i < source_count; i++) {
- m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
- m_index_of_source[&m_sources[i]] = i;
- }
-}
-
-hailo_status BaseDemuxElement::run_push(PipelineBuffer &&/*buffer*/)
-{
- return HAILO_NOT_IMPLEMENTED;
-}
-
-Expected<PipelineBuffer> BaseDemuxElement::run_pull(PipelineBuffer &&optional, const PipelinePad &source)
-{
- CHECK_AS_EXPECTED(!optional, HAILO_INVALID_ARGUMENT, "Optional buffer is not allowed in demux element!");
-
- // TODO: should we lock here? or only right before wait_for?
- std::unique_lock<std::mutex> lock(m_mutex);
- if (!m_is_activated) {
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
-
- m_was_source_called[m_index_of_source[&source]] = true;
- if (were_all_sinks_called()) {
- auto input = next_pad().run_pull();
- if (HAILO_STREAM_ABORTED_BY_USER == input.status()) {
- LOGGER__INFO("run_pull of demux element was aborted!");
- m_was_stream_aborted = true;
- lock.unlock();
- m_cv.notify_all();
- return make_unexpected(input.status());
- }
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == input.status()) {
- return make_unexpected(input.status());
- }
- CHECK_EXPECTED(input);
-
- auto outputs = action(input.release());
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == outputs.status()) {
- return make_unexpected(outputs.status());
- }
- CHECK_EXPECTED(outputs);
-
- m_buffers_for_action = outputs.release();
-
- for (uint32_t i = 0; i < m_was_source_called.size(); i++) {
- m_was_source_called[i] = false;
- }
-
- // Manual unlocking is done before notifying, to avoid waking up the waiting thread only to block again
- lock.unlock();
- m_cv.notify_all();
- } else {
- auto cv_status = m_cv.wait_for(lock, m_timeout);
- CHECK_AS_EXPECTED(std::cv_status::timeout != cv_status, HAILO_TIMEOUT, "Waiting for other threads in demux {} has reached a timeout (timeout={}ms)", name(), m_timeout.count());
-
- if (m_was_stream_aborted) {
- lock.unlock();
- m_cv.notify_all();
- return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
- }
-
- // We check if the element is not activated in case notify_all() was called from deactivate()
- if (!m_is_activated) {
- lock.unlock();
- m_cv.notify_all();
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
- }
-
- assert(m_index_of_source[&source] < m_buffers_for_action.size());
- return std::move(m_buffers_for_action[m_index_of_source[&source]]);
-}
-
-bool BaseDemuxElement::were_all_sinks_called()
-{
- return std::all_of(m_was_source_called.begin(), m_was_source_called.end(), [](bool v) { return v; });
-}
-
-hailo_status BaseDemuxElement::execute_activate()
-{
- if (m_is_activated) {
- return HAILO_SUCCESS;
- }
- m_is_activated = true;// TODO Should this always be true, no matter the status of source().activate()?
- m_was_stream_aborted = false;
- return PipelineElement::execute_activate();
-}
-
-hailo_status BaseDemuxElement::execute_deactivate()
-{
- if (!m_is_activated) {
- return HAILO_SUCCESS;
- }
- m_is_activated = false;
-
- // deactivate should be called before mutex acquire and notify_all because it is possible that all queues are waiting on
- // the run_pull of the source (HwRead) and the mutex is already acquired so this would prevent a timeout error
- hailo_status status = PipelineElement::execute_deactivate();
-
- {
- // There is a case where the other thread is halted (via context switch) before the wait_for() function,
- // then we call notify_all() here, and then the wait_for() is called - resulting in a timeout.
- // notify_all() only works on threads which are already waiting, so that's why we acquire the lock here.
- std::unique_lock<std::mutex> lock(m_mutex);
- }
- m_cv.notify_all();
-
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status BaseDemuxElement::execute_post_deactivate()
-{
- for (uint32_t i = 0; i < m_was_source_called.size(); i++) {
- m_was_source_called[i] = false;
- }
- return PipelineElement::execute_post_deactivate();
-}
-
-hailo_status BaseDemuxElement::execute_abort()
-{
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_was_stream_aborted = true;
- }
- m_cv.notify_all();
- return PipelineElement::execute_abort();
-}
-
-PipelinePad &BaseDemuxElement::next_pad()
-{
- // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
- return *m_sinks[0].prev();
-}
-
-hailo_status BaseDemuxElement::set_timeout(std::chrono::milliseconds timeout)
-{
- m_timeout = timeout;
- return HAILO_SUCCESS;
-}
-
-std::vector<PipelinePad*> BaseDemuxElement::execution_pads()
-{
- std::vector<PipelinePad*> result{&next_pad()};
- return result;
-}
-
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pipeline.hpp
- * @brief Hailo Infer Pipeline
- **/
-
-#ifndef _HAILO_PIPELINE_HPP_
-#define _HAILO_PIPELINE_HPP_
-
-#include "hailo/buffer.hpp"
-#include "hailo/runtime_statistics.hpp"
-#include "thread_safe_queue.hpp"
-
-#include <memory>
-#include <thread>
-#include <sstream>
-#include <functional>
-
-namespace hailort
-{
-
-using PipelineTimePoint = std::chrono::steady_clock::time_point;
-#define BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT (std::chrono::milliseconds(10000))
-#define DEFAULT_NUM_FRAMES_BEFORE_COLLECTION_START (100)
-
-class BufferPool;
-using BufferPoolPtr = std::shared_ptr<BufferPool>;
-
-class PipelineBuffer final
-{
-public:
- class Metadata final
- {
- public:
- explicit Metadata(PipelineTimePoint start_time);
- // Creates an empty metadata object
- Metadata();
-
- ~Metadata() = default;
- Metadata(const Metadata &) = default;
- Metadata &operator=(const Metadata &) = delete;
- Metadata(Metadata &&other) = default;
- Metadata &operator=(Metadata &&other) = default;
-
- PipelineTimePoint get_start_time() const;
- void set_start_time(PipelineTimePoint val);
-
- private:
- PipelineTimePoint m_start_time;
- };
-
- enum class Type {
- DATA = 0,
- FLUSH,
- DEACTIVATE
- };
-
- // Creates an empty PipelineBuffer (with no buffer/memory view)
- PipelineBuffer();
- PipelineBuffer(Type type);
- PipelineBuffer(MemoryView view, bool should_measure = false);
- PipelineBuffer(Buffer &&buffer, BufferPoolPtr pool, bool should_measure = false);
- ~PipelineBuffer();
-
- PipelineBuffer(const PipelineBuffer &) = delete;
- PipelineBuffer &operator=(const PipelineBuffer &) = delete;
- PipelineBuffer(PipelineBuffer &&other);
- PipelineBuffer &operator=(PipelineBuffer &&other);
- explicit operator bool() const;
-
- uint8_t* data();
- size_t size() const;
- MemoryView as_view();
- Type get_type() const;
- Metadata get_metadata() const;
- void set_metadata(Metadata &&val);
-
-private:
- Type m_type;
- Buffer m_buffer;
- bool m_should_release_buffer;
- BufferPoolPtr m_pool;
- MemoryView m_view;
- Metadata m_metadata;
-
- static PipelineTimePoint add_timestamp(bool should_measure);
-};
-
-// The buffer pool has to be created as a shared pointer (via the create function) because we use shared_from_this(),
-// which is only allowed if there is already a shared pointer pointing to "this"!
-class BufferPool : public std::enable_shared_from_this<BufferPool>
-{
-public:
- static Expected<BufferPoolPtr> create(size_t buffer_size, size_t buffer_count, EventPtr shutdown_event,
- hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags);
- BufferPool(size_t buffer_size, bool measure_vstream_latency, SpscQueue<Buffer> &&free_buffers, AccumulatorPtr &&queue_size_accumulator);
- virtual ~BufferPool() = default;
-
- size_t buffer_size();
- Expected<PipelineBuffer> acquire_buffer(std::chrono::milliseconds timeout);
- AccumulatorPtr get_queue_size_accumulator();
- Expected<PipelineBuffer> get_available_buffer(PipelineBuffer &&optional, std::chrono::milliseconds timeout);
-
-private:
- hailo_status release_buffer(Buffer &&buffer);
-
- const size_t m_buffer_size;
- const bool m_measure_vstream_latency;
- SpscQueue<Buffer> m_free_buffers;
- AccumulatorPtr m_queue_size_accumulator;
- std::mutex m_release_buffer_mutex;
-
- friend class PipelineBuffer;
-};
-
-class DurationCollector final
-{
-public:
- // TODO: HRT-4258
- // Note: We start measuring the FPS/latency after num_frames_before_collection_start calls to start_measurement +
- // complete_measurement. This is to allow the vstream pipeline to stabilize. Thus we ignore invalid
- // measurements that are due to buffering that occours when the pipeline starts.
- static Expected<DurationCollector> create(hailo_pipeline_elem_stats_flags_t flags,
- uint32_t num_frames_before_collection_start = DEFAULT_NUM_FRAMES_BEFORE_COLLECTION_START);
- DurationCollector(const DurationCollector &) = delete;
- DurationCollector(DurationCollector &&other) = default;
- DurationCollector &operator=(const DurationCollector &) = delete;
- DurationCollector &operator=(DurationCollector &&other) = delete;
- ~DurationCollector() = default;
-
- void start_measurement();
- void complete_measurement();
-
- // latency_accumulator will measure latency in seconds
- AccumulatorPtr get_latency_accumulator();
- // average_fps_accumulator will measure fps in seconds^-1
- AccumulatorPtr get_average_fps_accumulator();
-
-private:
- DurationCollector(bool measure_latency, bool measure_average_fps,
- AccumulatorPtr &&latency_accumulator, AccumulatorPtr &&average_fps_accumulator,
- uint32_t num_frames_before_collection_start);
- static bool should_measure_latency(hailo_pipeline_elem_stats_flags_t flags);
- static bool should_measure_average_fps(hailo_pipeline_elem_stats_flags_t flags);
-
- const bool m_measure_latency;
- const bool m_measure_average_fps;
- const bool m_measure;
- AccumulatorPtr m_latency_accumulator;
- AccumulatorPtr m_average_fps_accumulator;
- PipelineTimePoint m_start;
- size_t m_count;
- const size_t m_num_frames_before_collection_start;
-};
-
-class PipelineObject
-{
-public:
- PipelineObject(const std::string &name);
- virtual ~PipelineObject() = default;
- PipelineObject(PipelineObject &&) noexcept = default;
- PipelineObject& operator=(PipelineObject &&) noexcept = default;
-
- const std::string &name() const;
-
- static std::string create_element_name(const std::string &element_name, const std::string &stream_name, uint8_t stream_index);
-
-private:
- std::string m_name;
-};
-
-class PipelineElement;
-using PushCompleteCallback = std::function<void(const PipelineBuffer::Metadata&)>;
-using PullCompleteCallback = std::function<void(const PipelineBuffer::Metadata&)>;
-
-struct NetFlowPad {
- std::string name;
- hailo_format_t format;
- hailo_quant_info_t quant_info;
- uint32_t number_of_classes = 0; // temporarly here, should be only if the previous op is NMS
-};
-
-struct NetFlowElement {
- std::vector<NetFlowPad> input_pads;
- std::vector<NetFlowPad> output_pads;
-
- enum class Type
- {
- None = 0,
- CoreOp = 1,
- YoloNmsOp = 2
- };
-
- Type type;
- std::string name;
- std::set<std::string> input_streams;
-
- virtual ~NetFlowElement() = default;
-};
-
-struct YoloBboxDecoder {
- std::vector<uint32_t> h;
- std::vector<uint32_t> w;
- uint32_t stride = 0;
- // uint32_t pad_index;
- std::string stream_name;
-};
-
-struct NetFlowNmsElement : NetFlowElement {
- float32_t nms_score_th = 0;
- float32_t nms_iou_th = 0;
- uint32_t max_proposals_per_class = 0;
- uint32_t classes = 0;
- bool background_removal = false;
- uint32_t background_removal_index = 0;
-};
-
-struct NetFlowYoloNmsElement final : NetFlowNmsElement {
- std::vector<YoloBboxDecoder> bbox_decoders;
- float32_t image_height = 0;
- float32_t image_width = 0;
- uint32_t input_division_factor = 0;
-};
-
-class PipelinePad final : public PipelineObject
-{
-public:
- enum class Type
- {
- SOURCE,
- SINK
- };
-
- // Link left's source pad (left->sources()[left_source_index]) with right's sink pad (right->right()[right_sink_index])
- static hailo_status link_pads(std::shared_ptr<PipelineElement> left, std::shared_ptr<PipelineElement> right,
- uint32_t left_source_index = 0, uint32_t right_sink_index = 0);
- // Link left's source pad (left.sources()[left_source_index]) with right's sink pad (right.right()[right_sink_index])
- static hailo_status link_pads(PipelineElement &left, PipelineElement &right, uint32_t left_source_index = 0,
- uint32_t right_sink_index = 0);
- static std::string create_pad_name(const std::string &element_name, Type pad_type);
-
- PipelinePad(PipelineElement &element, const std::string &element_name, Type pad_type);
- PipelinePad(const PipelinePad &) = delete;
- PipelinePad(PipelinePad &&other) = default;
- PipelinePad &operator=(const PipelinePad &) = delete;
- PipelinePad &operator=(PipelinePad &&other) = delete;
- ~PipelinePad() = default;
-
- hailo_status activate();
- hailo_status deactivate();
- hailo_status post_deactivate();
- hailo_status clear();
- hailo_status flush();
- hailo_status abort();
- hailo_status wait_for_finish();
- hailo_status resume();
- virtual hailo_status run_push(PipelineBuffer &&buffer);
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional = PipelineBuffer());
- void set_push_complete_callback(PushCompleteCallback push_complete_callback);
- void set_pull_complete_callback(PullCompleteCallback pull_complete_callback);
- void set_next(PipelinePad *next);
- void set_prev(PipelinePad *prev);
- PipelinePad *next();
- PipelinePad *prev();
- PipelineElement &element();
- const PipelinePad *next() const;
- const PipelinePad *prev() const;
- const PipelineElement &element() const;
-
-protected:
- PipelineElement &m_element;
- PipelinePad *m_next;
- PipelinePad *m_prev;
- PushCompleteCallback m_push_complete_callback;
- PullCompleteCallback m_pull_complete_callback;
-
-private:
- // Automatic naming isn't thread safe
- static uint32_t index;
-};
-
-// Note: PipelinePads accept 'PipelineElement &' in their ctor. PipelineElements can pass "*this" to their
-// PipelinePads (sources/sinks) in the PipelineElement ctor. This is OK because the ctor of PipelinePad
-// does nothing with the element reference other than setting it as class member.
-class PipelineElement : public PipelineObject
-{
-public:
- PipelineElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual ~PipelineElement() = default;
-
- PipelineElement(PipelineElement &&other) = delete;
- PipelineElement(const PipelineElement &) = delete;
- PipelineElement &operator=(const PipelineElement &) = delete;
- PipelineElement &operator=(PipelineElement &&other) = delete;
-
- hailo_status activate();
- hailo_status deactivate();
- hailo_status post_deactivate();
- hailo_status clear();
- hailo_status flush();
- hailo_status abort();
- hailo_status resume();
- hailo_status wait_for_finish();
- virtual hailo_status run_push(PipelineBuffer &&buffer) = 0;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) = 0;
- AccumulatorPtr get_fps_accumulator();
- AccumulatorPtr get_latency_accumulator();
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators();
- std::vector<PipelinePad> &sinks();
- std::vector<PipelinePad> &sources();
- const std::vector<PipelinePad> &sinks() const;
- const std::vector<PipelinePad> &sources() const;
- virtual std::string description() const;
-
- virtual void set_on_cant_pull_callback(std::function<void()> callback)
- {
- m_cant_pull_callback = callback;
- }
-
- virtual void set_on_can_pull_callback(std::function<void()> callback)
- {
- m_can_pull_callback = callback;
- }
-
-protected:
- DurationCollector m_duration_collector;
- std::shared_ptr<std::atomic<hailo_status>> m_pipeline_status;
- std::vector<PipelinePad> m_sinks;
- std::vector<PipelinePad> m_sources;
-
- std::function<void()> m_cant_pull_callback;
- std::function<void()> m_can_pull_callback;
-
- virtual std::vector<PipelinePad*> execution_pads() = 0;
- virtual hailo_status execute_activate();
- virtual hailo_status execute_deactivate();
- virtual hailo_status execute_post_deactivate();
- virtual hailo_status execute_clear();
- virtual hailo_status execute_flush();
- virtual hailo_status execute_abort();
- virtual hailo_status execute_resume();
- virtual hailo_status execute_wait_for_finish();
-
- virtual hailo_status execute(std::function<hailo_status(PipelinePad*)>);
-};
-
-// An element with one source pad only (generates data)
-class SourceElement : public PipelineElement
-{
-public:
- SourceElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- PipelinePad &source();
-
-protected:
- virtual std::vector<PipelinePad*> execution_pads() override;
-};
-
-// An element with one sink pad only (consumes data)
-class SinkElement : public PipelineElement
-{
-public:
- SinkElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- PipelinePad &sink();
-
-protected:
- virtual std::vector<PipelinePad*> execution_pads() override;
-};
-
-// Transfers data from one pad to another pad. Has one sink pad and one source pad.
-class IntermediateElement : public PipelineElement
-{
-public:
- IntermediateElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual PipelinePad &next_pad() = 0;
-
-protected:
- virtual std::vector<PipelinePad*> execution_pads() override;
-};
-
-class FilterElement : public IntermediateElement
-{
-public:
- FilterElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual ~FilterElement() = default;
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
-
-protected:
- // The optional buffer functions as an output buffer that the user can write to instead of acquiring a new buffer
- virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) = 0;
-};
-
-class BaseQueueElement : public IntermediateElement
-{
-public:
- virtual ~BaseQueueElement();
-
- hailo_status set_timeout(std::chrono::milliseconds timeout);
- virtual std::string description() const override;
-
- static constexpr auto INIFINITE_TIMEOUT() { return std::chrono::milliseconds(HAILO_INFINITE); }
-
-protected:
- static Expected<SpscQueue<PipelineBuffer>> create_queue(size_t queue_size, EventPtr shutdown_event);
- BaseQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- Event &&activation_event, Event &&deactivation_event);
-
- hailo_status pipeline_status();
-
- virtual hailo_status execute_activate() override;
- virtual hailo_status execute_post_deactivate() override;
- virtual hailo_status execute_clear() override;
- virtual hailo_status execute_resume() override;
- virtual hailo_status execute_wait_for_finish() override;
-
- /// Starts/stops the queue thread. This functions needs to be called on subclasses ctor and dtor
- /// accordingly because otherwise, if we will start/stop thread in this class we will face pure-call
- /// to `run_in_thread`.
- /// This functions don't return status because they are meant to be called on ctor and dtor
- void start_thread();
- void stop_thread();
-
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
-
- virtual hailo_status run_in_thread() = 0;
-
- SpscQueue<PipelineBuffer> m_queue;
- EventPtr m_shutdown_event;
- std::chrono::milliseconds m_timeout;
- std::thread m_thread;
- std::atomic_bool m_is_thread_running;
- Event m_activation_event;
- Event m_deactivation_event;
- AccumulatorPtr m_queue_size_accumulator;
- std::atomic_bool m_is_run_in_thread_running;
- std::condition_variable m_cv;
- std::mutex m_mutex;
-};
-
-class PushQueueElement : public BaseQueueElement
-{
-public:
- static Expected<std::shared_ptr<PushQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
- size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<PushQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- PushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
- virtual ~PushQueueElement();
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
- virtual PipelinePad &next_pad() override;
-
-protected:
- virtual hailo_status execute_deactivate() override;
- virtual hailo_status run_in_thread() override;
- virtual hailo_status execute_abort() override;
-};
-
-class PullQueueElement : public BaseQueueElement
-{
-public:
- static Expected<std::shared_ptr<PullQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
- size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<PullQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- PullQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
- virtual ~PullQueueElement();
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
- virtual PipelinePad &next_pad() override;
-
- virtual void set_on_cant_pull_callback(std::function<void()> callback) override
- {
- m_cant_pull_callback = callback;
- m_queue.set_on_cant_enqueue_callback([this] () {
- m_cant_pull_callback();
- });
- }
-
- virtual void set_on_can_pull_callback(std::function<void()> callback) override
- {
- m_can_pull_callback = callback;
- m_queue.set_on_can_enqueue_callback([this] () {
- m_can_pull_callback();
- });
- }
-
-protected:
- virtual hailo_status execute_deactivate() override;
- virtual hailo_status run_in_thread() override;
-};
-
-class UserBufferQueueElement : public PullQueueElement
-{
-public:
- static Expected<std::shared_ptr<UserBufferQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
- hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<UserBufferQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- UserBufferQueueElement(SpscQueue<PipelineBuffer> &&queue, SpscQueue<PipelineBuffer> &&full_buffer_queue, EventPtr shutdown_event,
- const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
-
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
-
- virtual void set_on_cant_pull_callback(std::function<void()> callback) override
- {
- m_cant_pull_callback = callback;
- }
-
- virtual void set_on_can_pull_callback(std::function<void()> callback) override
- {
- m_can_pull_callback = callback;
- }
-
-protected:
- virtual hailo_status execute_clear() override;
- virtual hailo_status run_in_thread() override;
-
-private:
- SpscQueue<PipelineBuffer> m_full_buffer_queue;
-};
-
-class BaseMuxElement : public PipelineElement
-{
-public:
- BaseMuxElement(size_t sink_count, const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual ~BaseMuxElement() = default;
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
-
-protected:
- virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) = 0;
- virtual std::vector<PipelinePad*> execution_pads() override;
-
- std::chrono::milliseconds m_timeout;
-};
-
-class BaseDemuxElement : public PipelineElement
-{
-public:
- BaseDemuxElement(size_t source_count, const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual ~BaseDemuxElement() = default;
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
- hailo_status set_timeout(std::chrono::milliseconds timeout);
-
-protected:
- virtual hailo_status execute_activate() override;
- virtual hailo_status execute_deactivate() override;
- virtual hailo_status execute_post_deactivate() override;
- virtual hailo_status execute_abort() override;
- virtual Expected<std::vector<PipelineBuffer>> action(PipelineBuffer &&input) = 0;
- virtual std::vector<PipelinePad*> execution_pads() override;
-
- std::chrono::milliseconds m_timeout;
-
-private:
- bool were_all_sinks_called();
- PipelinePad &next_pad();
-
- std::atomic_bool m_is_activated;
- std::atomic_bool m_was_stream_aborted;
- std::unordered_map<const PipelinePad*, uint32_t> m_index_of_source;
- std::vector<bool> m_was_source_called;
- std::vector<PipelineBuffer> m_buffers_for_action;
- std::mutex m_mutex;
- std::condition_variable m_cv;
-};
-
-enum class AccumulatorType
-{
- FPS,
- LATENCY,
- QUEUE_SIZE
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_PIPELINE_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pipeline_multiplexer.cpp
- * @brief: Pipeline Multiplexer
- **/
-
-#include "pipeline_multiplexer.hpp"
-#include "common/utils.hpp"
-#include "hailo/hailort_common.hpp"
-
-namespace hailort
-{
-
-PipelineMultiplexer::PipelineMultiplexer() :
- m_next_to_write(0),
- m_order_queue(),
- m_currently_writing(INVALID_NETWORK_GROUP_HANDLE),
- m_written_streams_count(0),
- m_read_streams_count(0),
- m_next_to_read_after_drain(INVALID_NETWORK_GROUP_HANDLE)
-{}
-
-bool PipelineMultiplexer::should_use_multiplexer()
-{
- auto disable_multiplexer_env = std::getenv(DISABLE_MULTIPLEXER_ENV_VAR);
- if ((nullptr != disable_multiplexer_env) && (strnlen(disable_multiplexer_env, 2) == 1) && (strncmp(disable_multiplexer_env, "1", 1) == 0)) {
- return false;
- }
- return true;
-}
-
-hailo_status PipelineMultiplexer::add_network_group_instance(multiplexer_ng_handle_t network_group_handle, ConfiguredNetworkGroup &network_group)
-{
- std::unique_lock<std::mutex> lock(m_writing_mutex);
- std::unique_lock<std::mutex> read_lock(m_reading_mutex);
- assert(!contains(m_should_ng_stop, network_group_handle));
-
- m_should_ng_stop[network_group_handle] = false;
-
- m_input_streams_count = static_cast<uint32_t>(network_group.get_input_streams().size());
- m_output_streams_count = static_cast<uint32_t>(network_group.get_output_streams().size());
-
- m_write_barriers[network_group_handle] = make_shared_nothrow<Barrier>(m_input_streams_count);
- CHECK(nullptr != m_write_barriers[network_group_handle], HAILO_OUT_OF_HOST_MEMORY);
- m_is_waiting_to_write[network_group_handle] = false;
-
- for (auto &output_stream : network_group.get_output_streams()) {
- m_is_stream_reading[network_group_handle][output_stream.get().name()] = false;
- }
-
- return HAILO_SUCCESS;
-}
-
-void PipelineMultiplexer::set_output_vstreams_names(multiplexer_ng_handle_t network_group_handle, const std::vector<OutputVStream> &output_vstreams)
-{
- std::unique_lock<std::mutex> lock(m_writing_mutex);
- for (const auto &output_vstream : output_vstreams) {
- m_can_output_vstream_read[network_group_handle][output_vstream.name()] = true;
- }
- m_can_network_group_read[network_group_handle] = true;
-}
-
-bool PipelineMultiplexer::has_more_than_one_ng_instance() const
-{
- return instances_count() > 1;
-}
-
-size_t PipelineMultiplexer::instances_count() const
-{
- return m_should_ng_stop.size();
-}
-
-hailo_status PipelineMultiplexer::wait_for_write(multiplexer_ng_handle_t network_group_handle)
-{
- std::shared_ptr<hailort::Barrier> barrier;
- {
- std::unique_lock<std::mutex> lock(m_writing_mutex);
- assert(contains(m_write_barriers, network_group_handle));
- barrier = m_write_barriers[network_group_handle];
- }
- // TODO: This has no timeout
- // TODO: HRT-8634
- barrier->arrive_and_wait();
- {
- std::unique_lock<std::mutex> lock(m_writing_mutex);
- assert(contains(m_should_ng_stop, network_group_handle));
- assert(contains(m_is_waiting_to_write, network_group_handle));
-
- m_is_waiting_to_write[network_group_handle] = true;
- m_writing_cv.wait(lock, [this, network_group_handle] {
- if (!has_more_than_one_ng_instance() || !should_use_multiplexer()) {
- return true;
- }
-
- if (m_should_ng_stop[network_group_handle]) {
- return true;
- }
-
- if (m_currently_writing == network_group_handle) {
- return true;
- }
-
- if (!can_network_group_read(network_group_handle)) {
- return false;
- }
-
- if (INVALID_NETWORK_GROUP_HANDLE == m_currently_writing) {
- if ((m_next_to_write != network_group_handle) && m_is_waiting_to_write[m_next_to_write] && can_network_group_read(m_next_to_write)) {
- return false;
- }
-
- return true;
- }
-
- return false;
- });
- m_is_waiting_to_write[network_group_handle] = false;
-
- if (m_should_ng_stop[network_group_handle]) {
- return HAILO_STREAM_ABORTED_BY_USER;
- }
-
- if (INVALID_NETWORK_GROUP_HANDLE == m_currently_writing) {
- m_currently_writing = network_group_handle;
- m_next_to_write = m_currently_writing;
- }
- }
- m_writing_cv.notify_all();
-
- return HAILO_SUCCESS;
-}
-
-bool PipelineMultiplexer::can_network_group_read(multiplexer_ng_handle_t network_group_handle)
-{
- if (m_should_ng_stop[network_group_handle]) {
- return false;
- }
-
- if (!contains(m_can_network_group_read, network_group_handle)) {
- return true;
- }
-
- return m_can_network_group_read[network_group_handle];
-}
-
-hailo_status PipelineMultiplexer::signal_write_finish(multiplexer_ng_handle_t network_group_handle)
-{
- std::unique_lock<std::mutex> lock(m_writing_mutex);
- m_written_streams_count++;
- if (m_written_streams_count == m_input_streams_count) {
- m_written_streams_count = 0;
- m_currently_writing = INVALID_NETWORK_GROUP_HANDLE;
-
- m_next_to_write++;
- m_next_to_write %= static_cast<uint32_t>(instances_count());
-
- {
- std::unique_lock<std::mutex> reading_lock(m_reading_mutex);
- m_order_queue.push_back(network_group_handle);
- }
- m_reading_cv.notify_all();
-
- lock.unlock();
- m_writing_cv.notify_all();
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<uint32_t> PipelineMultiplexer::wait_for_read(multiplexer_ng_handle_t network_group_handle, const std::string &stream_name,
- const std::chrono::milliseconds &timeout)
-{
- std::unique_lock<std::mutex> lock(m_reading_mutex);
- uint32_t drain_frames = 0;
-
- assert(contains(m_should_ng_stop, network_group_handle));
- assert(contains(m_is_stream_reading, network_group_handle));
- assert(contains(m_is_stream_reading[network_group_handle], stream_name));
-
- auto wait_res = m_reading_cv.wait_for(lock, timeout, [this, network_group_handle, stream_name, &drain_frames] {
- if (m_should_ng_stop[network_group_handle]) {
- return true;
- }
-
- if (m_is_stream_reading[network_group_handle][stream_name]) {
- return false;
- }
-
- if (m_next_to_read_after_drain == network_group_handle) {
- drain_frames = m_num_frames_to_drain[stream_name];
- return true;
- }
-
- if (m_order_queue.empty()) {
- return false;
- }
-
- if (m_order_queue.front() != network_group_handle) {
- if (!m_should_ng_stop[m_order_queue.front()]) {
- return false;
- }
-
- uint32_t max_drain_count = get_frame_count_to_drain(network_group_handle);
- if (0 == max_drain_count) {
- return false;
- }
-
- drain_frames = drain_aborted_in_order_queue(network_group_handle, stream_name, max_drain_count);
- }
-
- return true;
- });
- CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "{} (D2H) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
-
- if (m_should_ng_stop[network_group_handle]) {
- return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
- }
-
- m_is_stream_reading[network_group_handle][stream_name] = true;
-
- return drain_frames;
-}
-
-uint32_t PipelineMultiplexer::get_frame_count_to_drain(multiplexer_ng_handle_t network_group_handle)
-{
- uint32_t drain_count = 0;
- for (const auto &handle : m_order_queue) {
- if (!m_should_ng_stop[handle]) {
- if (handle == network_group_handle) {
- // Current instance is in the front after draining
- break;
- } else {
- // Someone else should drain these frames, the current instance won't be in front after draining
- return 0;
- }
- }
-
- drain_count++;
- }
-
- return drain_count;
-}
-
-uint32_t PipelineMultiplexer::drain_aborted_in_order_queue(multiplexer_ng_handle_t network_group_handle, const std::string &stream_name,
- uint32_t max_drain_count)
-{
- // In case of multiple outputs where one or more already read the frame we need to drain one less frame
- for (auto &name_flag_pair : m_is_stream_reading[m_order_queue.front()]) {
- if (name_flag_pair.second) {
- m_num_frames_to_drain[name_flag_pair.first] = max_drain_count - 1;
- } else {
- m_num_frames_to_drain[name_flag_pair.first] = max_drain_count;
- }
- }
-
- m_next_to_read_after_drain = network_group_handle;
- m_read_streams_count = 0;
- for (uint32_t i = 0; i < max_drain_count; i++) {
- for (auto &name_flag_pair : m_is_stream_reading[m_order_queue.front()]) {
- name_flag_pair.second = false;
- }
- m_order_queue.pop_front();
- }
-
- return m_num_frames_to_drain[stream_name];
-}
-
-hailo_status PipelineMultiplexer::signal_read_finish(multiplexer_ng_handle_t network_group_handle)
-{
- std::unique_lock<std::mutex> lock(m_reading_mutex);
- assert(contains(m_is_stream_reading, network_group_handle));
-
- if (m_should_ng_stop[network_group_handle]) {
- return HAILO_STREAM_ABORTED_BY_USER;
- }
-
- m_read_streams_count++;
- if (m_read_streams_count == m_output_streams_count) {
- m_read_streams_count = 0;
- m_order_queue.pop_front();
- for (auto &name_flag_pair : m_is_stream_reading[network_group_handle]) {
- name_flag_pair.second = false;
- }
-
- m_next_to_read_after_drain = INVALID_NETWORK_GROUP_HANDLE;
-
- lock.unlock();
- m_reading_cv.notify_all();
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status PipelineMultiplexer::enable_network_group(multiplexer_ng_handle_t network_group_handle)
-{
- {
- std::unique_lock<std::mutex> write_lock(m_writing_mutex);
- std::unique_lock<std::mutex> read_lock(m_reading_mutex);
- assert(contains(m_should_ng_stop, network_group_handle));
- if (!m_should_ng_stop[network_group_handle]) {
- return HAILO_SUCCESS;
- }
-
- m_should_ng_stop[network_group_handle] = false;
- }
-
- m_writing_cv.notify_all();
- m_reading_cv.notify_all();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status PipelineMultiplexer::disable_network_group(multiplexer_ng_handle_t network_group_handle)
-{
- {
- std::unique_lock<std::mutex> write_lock(m_writing_mutex);
- std::unique_lock<std::mutex> read_lock(m_reading_mutex);
- assert(contains(m_should_ng_stop, network_group_handle));
- if (m_should_ng_stop[network_group_handle]) {
- return HAILO_SUCCESS;
- }
-
- m_should_ng_stop[network_group_handle] = true;
- if (m_currently_writing == network_group_handle) {
- m_currently_writing = INVALID_NETWORK_GROUP_HANDLE;
- }
-
- assert(contains(m_write_barriers, network_group_handle));
- m_write_barriers[network_group_handle]->terminate();
- }
-
- m_writing_cv.notify_all();
- m_reading_cv.notify_all();
-
- return HAILO_SUCCESS;
-}
-
-void PipelineMultiplexer::RunOnceForStream::add_instance()
-{
- std::unique_lock<std::mutex> lock(m_mutex);
- m_was_called[static_cast<uint32_t>(m_was_called.size())] = false;
-}
-
-void PipelineMultiplexer::RunOnceForStream::set_callback(std::function<hailo_status()> callback)
-{
- std::unique_lock<std::mutex> lock(m_mutex);
- m_callback = callback;
-}
-
-hailo_status PipelineMultiplexer::RunOnceForStream::run(multiplexer_ng_handle_t network_group_handle)
-{
- std::unique_lock<std::mutex> lock(m_mutex);
- assert(contains(m_was_called, network_group_handle));
-
- m_was_called[network_group_handle] = true;
- for (auto &handle_flag_pair : m_was_called) {
- if (!handle_flag_pair.second) {
- return HAILO_SUCCESS;
- }
- }
-
- for (auto &handle_flag_pair : m_was_called) {
- handle_flag_pair.second = false;
- }
-
- return m_callback();
-}
-
-hailo_status PipelineMultiplexer::register_run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t handle,
- std::function<hailo_status()> callback)
-{
- std::unique_lock<std::mutex> lock(m_register_run_once_mutex);
- if (!contains(m_run_once_db[stream_name], handle)) {
- m_run_once_db[stream_name][handle] = make_shared_nothrow<RunOnceForStream>();
- CHECK(nullptr != m_run_once_db[stream_name][handle], HAILO_OUT_OF_HOST_MEMORY);
-
- m_run_once_db[stream_name][handle]->set_callback(callback);
- }
-
- m_run_once_db[stream_name][handle]->add_instance();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status PipelineMultiplexer::run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t run_once_handle,
- multiplexer_ng_handle_t network_group_handle)
-{
- return m_run_once_db[stream_name][run_once_handle]->run(network_group_handle);
-}
-
-void PipelineMultiplexer::set_can_output_vstream_read(multiplexer_ng_handle_t network_group_handle, const std::string &vstream_name, bool can_read)
-{
- {
- std::unique_lock<std::mutex> lock(m_writing_mutex);
- assert(contains(m_can_output_vstream_read, network_group_handle));
- assert(contains(m_can_output_vstream_read[network_group_handle], vstream_name));
- assert(contains(m_can_network_group_read, network_group_handle));
-
- m_can_output_vstream_read[network_group_handle][vstream_name] = can_read;
-
- if (can_read != m_can_network_group_read[network_group_handle]) {
- m_can_network_group_read[network_group_handle] = true;
- for (const auto &name_bool_pair : m_can_output_vstream_read[network_group_handle]) {
- if (!name_bool_pair.second) {
- m_can_network_group_read[network_group_handle] = false;
- break;
- }
- }
- }
- }
- m_writing_cv.notify_all();
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file pipeline_multiplexer.hpp
- * @brief The pipeline multiplexer is a synchronization mechanism that allows communication
- * between different pipelines that use the same low-level streams.
- **/
-
-#ifndef _HAILO_PIPELINE_MULTIPLEXER_HPP_
-#define _HAILO_PIPELINE_MULTIPLEXER_HPP_
-
-#include "hailo/event.hpp"
-#include "hailo/network_group.hpp"
-#include "network_group_scheduler.hpp"
-#include "common/barrier.hpp"
-
-#include <mutex>
-#include <queue>
-
-namespace hailort
-{
-
-#define DISABLE_MULTIPLEXER_ENV_VAR "HAILO_DISABLE_MULTIPLEXER"
-
-using multiplexer_ng_handle_t = uint32_t;
-using run_once_for_stream_handle_t = uint32_t;
-
-class PipelineMultiplexer
-{
-public:
- PipelineMultiplexer();
-
- virtual ~PipelineMultiplexer() = default;
- PipelineMultiplexer(const PipelineMultiplexer &other) = delete;
- PipelineMultiplexer &operator=(const PipelineMultiplexer &other) = delete;
- PipelineMultiplexer &operator=(PipelineMultiplexer &&other) = delete;
- PipelineMultiplexer(PipelineMultiplexer &&other) = delete;
-
- hailo_status add_network_group_instance(multiplexer_ng_handle_t network_group_handle, ConfiguredNetworkGroup &network_group);
- void set_output_vstreams_names(multiplexer_ng_handle_t network_group_handle, const std::vector<OutputVStream> &output_vstreams);
- bool has_more_than_one_ng_instance() const;
- size_t instances_count() const;
- hailo_status wait_for_write(multiplexer_ng_handle_t network_group_handle);
- hailo_status signal_write_finish(multiplexer_ng_handle_t network_group_handle);
- Expected<uint32_t> wait_for_read(multiplexer_ng_handle_t network_group_handle, const std::string &stream_name,
- const std::chrono::milliseconds &timeout);
- hailo_status signal_read_finish(multiplexer_ng_handle_t network_group_handle);
- hailo_status enable_network_group(multiplexer_ng_handle_t network_group_handle);
- hailo_status disable_network_group(multiplexer_ng_handle_t network_group_handle);
-
- hailo_status register_run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t handle, std::function<hailo_status()> callback);
- hailo_status run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t run_once_handle,
- multiplexer_ng_handle_t network_group_handle);
-
- void set_can_output_vstream_read(multiplexer_ng_handle_t network_group_handle, const std::string &vstream_name, bool can_read);
-
- static bool should_use_multiplexer();
-
-private:
- std::unordered_map<multiplexer_ng_handle_t, std::atomic_bool> m_should_ng_stop;
- std::unordered_map<multiplexer_ng_handle_t, std::atomic_bool> m_is_waiting_to_write;
-
- uint32_t m_input_streams_count;
- uint32_t m_output_streams_count;
-
- multiplexer_ng_handle_t m_next_to_write;
- std::unordered_map<multiplexer_ng_handle_t, std::shared_ptr<Barrier>> m_write_barriers;
- std::deque<multiplexer_ng_handle_t> m_order_queue;
- std::mutex m_writing_mutex;
- std::condition_variable m_writing_cv;
- multiplexer_ng_handle_t m_currently_writing;
- std::atomic_uint32_t m_written_streams_count;
-
- std::unordered_map<multiplexer_ng_handle_t, std::unordered_map<std::string, std::atomic_bool>> m_is_stream_reading;
- std::mutex m_reading_mutex;
- std::condition_variable m_reading_cv;
- std::atomic_uint32_t m_read_streams_count;
- std::unordered_map<std::string, std::atomic_uint32_t> m_num_frames_to_drain;
- multiplexer_ng_handle_t m_next_to_read_after_drain;
-
- std::unordered_map<multiplexer_ng_handle_t, std::unordered_map<std::string, std::atomic_bool>> m_can_output_vstream_read;
- std::unordered_map<multiplexer_ng_handle_t, std::atomic_bool> m_can_network_group_read;
-
- bool can_network_group_read(multiplexer_ng_handle_t network_group_handle);
- uint32_t get_frame_count_to_drain(multiplexer_ng_handle_t network_group_handle);
- uint32_t drain_aborted_in_order_queue(multiplexer_ng_handle_t network_group_handle, const std::string &stream_name, uint32_t max_drain_count);
-
- class RunOnceForStream final
- {
- public:
- RunOnceForStream() {};
-
- private:
- void add_instance();
- void set_callback(std::function<hailo_status()> callback);
- hailo_status run(multiplexer_ng_handle_t network_group_handle);
-
- std::unordered_map<multiplexer_ng_handle_t, std::atomic_bool> m_was_called;
- std::function<hailo_status()> m_callback;
- std::mutex m_mutex;
-
- friend class PipelineMultiplexer;
- };
-
- // The run once map stores for each stream (by name), a map of RunOnceForStream which the user can register to.
- // run_once_for_stream_handle_t is the handle which the user can access to his specific callback (for example, abort stream function).
- // This is used for flushing, aborting and clear aborting streams.
- std::unordered_map<std::string, std::unordered_map<run_once_for_stream_handle_t, std::shared_ptr<RunOnceForStream>>> m_run_once_db;
- std::mutex m_register_run_once_mutex;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_PIPELINE_MULTIPLEXER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hailort_common.hpp
- * @brief Utility functions for rpc client communication
- **/
-
-#ifndef _HAILO_HAILORT_RPC_CLIENT_UTILS_HPP_
-#define _HAILO_HAILORT_RPC_CLIENT_UTILS_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailort_defaults.hpp"
-#include "common/async_thread.hpp"
-#include "hailort_rpc_client.hpp"
-#include "rpc/rpc_definitions.hpp"
-#include <chrono>
-
-namespace hailort
-{
-
-class HailoRtRpcClientUtils final
-{
-public:
- static HailoRtRpcClientUtils& get_instance()
- {
- static HailoRtRpcClientUtils instance;
- return instance;
- }
-
- hailo_status init_client_service_communication()
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- if (!m_initialized) {
-
- auto channel = grpc::CreateChannel(hailort::HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials());
- auto client = make_shared_nothrow<HailoRtRpcClient>(channel);
- CHECK(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
- m_initialized = true;
- auto reply = client->get_service_version();
- CHECK_EXPECTED_AS_STATUS(reply);
- hailo_version_t client_version = {};
- auto status = hailo_get_library_version(&client_version);
- CHECK_SUCCESS(status);
- auto service_version = reply.value();
- auto are_equal = [](auto version1, auto version2) {
- return version1.major == version2.major
- && version1.minor == version2.minor
- && version1.revision == version2.revision;
- };
- CHECK(are_equal(service_version, client_version), HAILO_INVALID_SERVICE_VERSION, "Invalid libhailort version on service: "
- "client version {}.{}.{}, service version {}.{}.{}",
- service_version.major, service_version.minor, service_version.revision,
- client_version.major, client_version.minor, client_version.revision);
-
- m_keep_alive_thread = make_unique_nothrow<AsyncThread<hailo_status>>([client] () {
- auto pid = getpid();
- auto status = client->client_keep_alive(pid);
- CHECK_SUCCESS(status);
- return HAILO_SUCCESS;
- });
-
- }
- return HAILO_SUCCESS;
- }
-
-private:
- ~HailoRtRpcClientUtils()
- {
- m_keep_alive_thread.release();
- }
-
- std::mutex m_mutex;
- AsyncThreadPtr<hailo_status> m_keep_alive_thread;
- bool m_initialized = false;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_HAILORT_RPC_CLIENT_UTILS_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group_scheduler.cpp
- * @brief: Network scheduler
- **/
-
-#include "scheduled_network_group.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "hef_internal.hpp"
-#include "vdevice_stream_multiplexer_wrapper.hpp"
-#include "scheduler_oracle.hpp"
-
-#include <fstream>
-
-namespace hailort
-{
-
-ScheduledNetworkGroup::ScheduledNetworkGroup(std::shared_ptr<ConfiguredNetworkGroup> cng, std::chrono::milliseconds timeout,
- uint16_t max_batch_size, StreamInfoVector &stream_infos, std::string network_group_name) :
- m_cng(cng),
- m_last_run_time_stamp(std::chrono::steady_clock::now()),
- m_timeout(std::move(timeout)),
- m_frame_was_sent(false),
- m_max_batch_size(max_batch_size),
- m_network_group_name(network_group_name),
- m_inputs_names(),
- m_outputs_names(),
- m_is_nms(false)
-{
- // Prepare empty counters for the added cng
- for (const auto &stream_info : stream_infos) {
- m_min_threshold_per_stream[stream_info.name] = DEFAULT_SCHEDULER_MIN_THRESHOLD;
- if (HAILO_H2D_STREAM == stream_info.direction) {
- m_requested_write_frames.insert(stream_info.name);
- m_finished_write_frames.insert(stream_info.name);
- m_h2d_requested_transferred_frames.insert(stream_info.name);
- m_h2d_finished_transferred_frames.insert(stream_info.name);
- m_inputs_names.push_back(stream_info.name);
- } else {
- m_requested_read_frames.insert(stream_info.name);
- m_ongoing_read_frames.insert(stream_info.name);
- m_finished_read_frames.insert(stream_info.name);
- m_d2h_finished_transferred_frames.insert(stream_info.name);
- m_outputs_names.push_back(stream_info.name);
- m_output_streams_read_orders[stream_info.name] = std::queue<uint32_t>();
- if (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) {
- m_is_nms = true;
- }
- }
- }
-}
-
-Expected<std::shared_ptr<ScheduledNetworkGroup>> ScheduledNetworkGroup::create(std::shared_ptr<ConfiguredNetworkGroup> added_cng, StreamInfoVector &stream_infos)
-{
- auto timeout = DEFAULT_SCHEDULER_TIMEOUT;
-
- uint16_t max_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE;
- auto cng_base = std::dynamic_pointer_cast<ConfiguredNetworkGroupBase>(added_cng);
- assert(nullptr != cng_base);
- if (cng_base->get_supported_features().multi_context) {
- auto batch_size = cng_base->get_stream_batch_size(stream_infos[0].name);
- CHECK_EXPECTED(batch_size);
-
- if (batch_size.value() > SINGLE_CONTEXT_BATCH_SIZE) {
- max_batch_size = batch_size.release();
- }
- }
-
- return make_shared_nothrow<ScheduledNetworkGroup>(added_cng, timeout, max_batch_size, stream_infos, added_cng->name());
-}
-
-bool ScheduledNetworkGroup::has_enough_space_in_read_buffers(uint32_t ongoing_frames)
-{
- auto output_streams = m_cng->get_output_streams();
- for (auto &output_stream : output_streams) {
- OutputStreamBase &vdevice_output = static_cast<OutputStreamBase&>(output_stream.get());
- if (auto pending_frames_size = vdevice_output.get_buffer_frames_size()) {
- if (pending_frames_size.value() <= ongoing_frames) {
- return false;
- }
- // If couldnt get pending frames size and count (e.g. NMS layer), assume we have space - scheduler switch will prevent deadlocks here
- }
- }
- return true;
-}
-
-bool ScheduledNetworkGroup::has_input_written_most_frames(const std::string &stream_name)
-{
- auto total_writes = total_written_frames_count();
- return total_writes[stream_name] == get_max_value_of_unordered_map(total_writes);
-}
-
-// TODO: Use get_pre_transfer_h2d_frames_count + get_h2d_transferred_frames_count
-// TODO: Avoid returning map (malloc)
-std::unordered_map<stream_name_t, uint32_t> ScheduledNetworkGroup::total_written_frames_count()
-{
- std::unordered_map<stream_name_t, uint32_t> write_sum;
- for (const auto &name : get_inputs_names()) {
- write_sum[name] = m_requested_write_frames[name] + m_finished_write_frames[name]
- + m_h2d_requested_transferred_frames[name]
- + m_h2d_finished_transferred_frames[name];
- }
- return write_sum;
-}
-
-// TODO: Use max(m_d2h_finished_transferred_frames) == 0 instead
-bool ScheduledNetworkGroup::has_pending_frames()
-{
- uint32_t h2d_transferred_frames_count = get_h2d_transferred_frames_count();
- for (const auto &name : get_outputs_names()) {
- if (m_finished_read_frames[name] < h2d_transferred_frames_count) {
- return true;
- }
- }
- return false;
-}
-
-uint32_t ScheduledNetworkGroup::get_h2d_transferred_frames_count()
-{
- std::unordered_map<stream_name_t, uint32_t> transferred_frames;
- for (const auto &name : get_inputs_names()) {
- transferred_frames[name] = m_h2d_requested_transferred_frames[name] + m_h2d_finished_transferred_frames[name];
- }
- return get_max_value_of_unordered_map(transferred_frames);
-}
-
-bool ScheduledNetworkGroup::can_stream_read(const std::string &stream_name)
-{
- return !m_output_streams_read_orders[stream_name].empty();
-}
-
-bool ScheduledNetworkGroup::use_dynamic_batch_flow()
-{
- return (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE != m_max_batch_size);
-}
-
-bool ScheduledNetworkGroup::has_ng_drained_everything(bool streaming_mode)
-{
- // On streaming mode we want to check those conditions even on NMS
- if (!is_nms() || streaming_mode) {
- if (!m_requested_write_frames.empty()) {
- return false;
- }
- if (!m_finished_write_frames.empty()) {
- return false;
- }
- if (!m_h2d_requested_transferred_frames.empty()) {
- return false;
- }
- }
-
- uint32_t written_frames = m_h2d_finished_transferred_frames.get_max_value();
- for (const auto &name : get_outputs_names()) {
- if ((m_finished_read_frames[name] + m_d2h_finished_transferred_frames[name]) < written_frames) {
- return false;
- }
- }
- return true;
-}
-
-void ScheduledNetworkGroup::decrease_current_ng_counters()
-{
- // Decrease only if counter is 2 or bigger because reaching 0 can cause states to change
- if (!m_h2d_finished_transferred_frames.all_values_bigger_or_equal(2)) {
- return;
- }
- if (!m_finished_read_frames.all_values_bigger_or_equal(2)) {
- return;
- }
-
- for (const auto &name : get_inputs_names()) {
- m_h2d_finished_transferred_frames[name]--;
- }
- for (const auto &name : get_outputs_names()) {
- m_finished_read_frames[name]--;
- }
-}
-
-uint32_t ScheduledNetworkGroup::get_pre_transfer_h2d_frames_count()
-{
- std::unordered_map<stream_name_t, uint32_t> write_sum;
- for (const auto &name : get_inputs_names()) {
- write_sum[name] = m_requested_write_frames[name] + m_finished_write_frames[name];
- }
- return get_max_value_of_unordered_map(write_sum);
-}
-
-hailo_status ScheduledNetworkGroup::set_timeout(const std::chrono::milliseconds &timeout, const stream_name_t &stream_name)
-{
- CHECK(!m_frame_was_sent, HAILO_INVALID_OPERATION,
- "Setting scheduler timeout is allowed only before sending / receiving frames on the network group.");
- m_timeout = timeout;
-
- auto name = (stream_name.empty()) ? get_network_group_name() : stream_name;
- LOGGER__INFO("Setting scheduler timeout of {} to {}ms", name, timeout.count());
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ScheduledNetworkGroup::set_threshold(uint32_t threshold, const stream_name_t &stream_name)
-{
- CHECK((CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) ||
- (threshold <= m_max_batch_size), HAILO_INVALID_ARGUMENT, "Threshold must be equal or lower than the maximum batch size!");
-
- CHECK(!m_frame_was_sent, HAILO_INVALID_OPERATION,
- "Setting scheduler threshold is allowed only before sending / receiving frames on the network group.");
-
- // TODO: Support setting threshold per stream. currently stream_name is always empty and de-facto we set threshold for the whole NG
- for (auto &threshold_per_stream_pair : m_min_threshold_per_stream) {
- threshold_per_stream_pair.second = threshold;
- }
-
- auto name = (stream_name.empty()) ? get_network_group_name() : stream_name;
- LOGGER__INFO("Setting scheduler threshold of {} to {} frames", name, threshold);
-
- return HAILO_SUCCESS;
-}
-
-std::string ScheduledNetworkGroup::get_network_group_name()
-{
- return m_network_group_name;
-}
-
-
-std::shared_ptr<ConfiguredNetworkGroup> ScheduledNetworkGroup::get_network_group()
-{
- return m_cng;
-}
-
-void ScheduledNetworkGroup::mark_frame_sent()
-{
- m_frame_was_sent = true;
-}
-
-std::chrono::time_point<std::chrono::steady_clock> ScheduledNetworkGroup::get_last_run_timestamp()
-{
- return m_last_run_time_stamp;
-}
-
-void ScheduledNetworkGroup::set_last_run_timestamp(const std::chrono::time_point<std::chrono::steady_clock> ×tamp)
-{
- m_last_run_time_stamp = timestamp;
-}
-
-Expected<std::chrono::milliseconds> ScheduledNetworkGroup::get_timeout(const stream_name_t &stream_name)
-{
- CHECK_AS_EXPECTED(stream_name.empty(), HAILO_INVALID_OPERATION, "timeout per network is not supported");
- auto timeout = m_timeout;
- return timeout;
-}
-
-Expected<uint32_t> ScheduledNetworkGroup::get_threshold(const stream_name_t &stream_name)
-{
- CHECK_AS_EXPECTED(contains(m_min_threshold_per_stream, stream_name), HAILO_NOT_FOUND);
- return m_min_threshold_per_stream[stream_name].load();
-}
-
-uint16_t ScheduledNetworkGroup::get_max_batch_size()
-{
- if (!use_dynamic_batch_flow()) {
- return SINGLE_CONTEXT_BATCH_SIZE;
- }
- return m_max_batch_size;
-}
-
-Counter &ScheduledNetworkGroup::requested_write_frames()
-{
- return m_requested_write_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::requested_write_frames(const stream_name_t &stream_name)
-{
- return m_requested_write_frames[stream_name];
-}
-
-uint32_t ScheduledNetworkGroup::requested_write_frames_max_value()
-{
- return m_requested_write_frames.get_max_value();
-}
-
-Counter &ScheduledNetworkGroup::finished_write_frames()
-{
- return m_finished_write_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::finished_write_frames(const stream_name_t &stream_name)
-{
- return m_finished_write_frames[stream_name];
-}
-
-uint32_t ScheduledNetworkGroup::finished_write_frames_min_value()
-{
- return m_finished_write_frames.get_min_value();
-}
-
-Counter &ScheduledNetworkGroup::h2d_requested_transferred_frames()
-{
- return m_h2d_requested_transferred_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::h2d_requested_transferred_frames(const stream_name_t &stream_name)
-{
- return m_h2d_requested_transferred_frames[stream_name];
-}
-
-Counter &ScheduledNetworkGroup::h2d_finished_transferred_frames()
-{
- return m_h2d_finished_transferred_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::h2d_finished_transferred_frames(const stream_name_t &stream_name)
-{
- return m_h2d_finished_transferred_frames[stream_name];
-}
-
-Counter &ScheduledNetworkGroup::requested_read_frames()
-{
- return m_requested_read_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::requested_read_frames(const stream_name_t &stream_name)
-{
- return m_requested_read_frames[stream_name];
-}
-
-Counter &ScheduledNetworkGroup::ongoing_read_frames()
-{
- return m_ongoing_read_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::ongoing_read_frames(const stream_name_t &stream_name)
-{
- return m_ongoing_read_frames[stream_name];
-}
-
-Counter &ScheduledNetworkGroup::d2h_finished_transferred_frames()
-{
- return m_d2h_finished_transferred_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::d2h_finished_transferred_frames(const stream_name_t &stream_name)
-{
- return m_d2h_finished_transferred_frames[stream_name];
-}
-
-Counter &ScheduledNetworkGroup::finished_read_frames()
-{
- return m_finished_read_frames;
-}
-
-std::atomic_uint32_t &ScheduledNetworkGroup::finished_read_frames(const stream_name_t &stream_name)
-{
- return m_finished_read_frames[stream_name];
-}
-
-uint32_t ScheduledNetworkGroup::finished_read_frames_min_value()
-{
- return m_finished_read_frames.get_min_value();
-}
-
-const std::vector<stream_name_t> &ScheduledNetworkGroup::get_inputs_names()
-{
- return m_inputs_names;
-}
-
-const std::vector<stream_name_t> &ScheduledNetworkGroup::get_outputs_names()
-{
- return m_outputs_names;
-}
-
-void ScheduledNetworkGroup::push_device_index(uint32_t device_index)
-{
- for (auto& stream_name : get_outputs_names()) {
- m_output_streams_read_orders[stream_name].push(device_index);
- }
-}
-
-uint32_t ScheduledNetworkGroup::pop_device_index(const stream_name_t &stream_name)
-{
- assert(contains(m_output_streams_read_orders, stream_name));
- assert(!m_output_streams_read_orders[stream_name].empty());
- auto device_index = m_output_streams_read_orders[stream_name].front();
- m_output_streams_read_orders[stream_name].pop();
-
- return device_index;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file network_group_scheduler.hpp
- * @brief Class declaration for NetworkGroupScheduler that schedules network groups to be active depending on the scheduling algorithm.
- **/
-
-#ifndef _HAILO_SCHEDULED_NETWORK_GROUP_HPP_
-#define _HAILO_SCHEDULED_NETWORK_GROUP_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/network_group.hpp"
-#include "common/utils.hpp"
-#include "common/filesystem.hpp"
-#include "scheduler_mon.hpp"
-
-#include <condition_variable>
-#include <queue>
-
-
-namespace hailort
-{
-
-#define DEFAULT_SCHEDULER_TIMEOUT (std::chrono::milliseconds(0))
-#define DEFAULT_SCHEDULER_MIN_THRESHOLD (0)
-
-using stream_name_t = std::string;
-
-#define SINGLE_CONTEXT_BATCH_SIZE (1)
-
-class Counter
-{
-public:
- Counter() : m_map()
- {};
-
- void insert(const stream_name_t &name)
- {
- assert(!contains(m_map, name));
- m_map[name] = 0;
- }
-
- std::atomic_uint32_t &operator [](const stream_name_t &name)
- {
- assert(contains(m_map, name));
- return m_map[name];
- }
-
- void increase(const stream_name_t &name)
- {
- assert(contains(m_map, name));
- m_map[name]++;
- }
-
- void decrease(const stream_name_t &name)
- {
- assert(contains(m_map, name));
- if (0 != m_map[name]) {
- m_map[name]--;
- }
- }
-
- uint32_t get_min_value()
- {
- return get_min_value_of_unordered_map(m_map);
- }
-
- uint32_t get_max_value()
- {
- return get_max_value_of_unordered_map(m_map);
- }
-
- bool all_values_bigger_or_equal(uint32_t value)
- {
- for (const auto &pair : m_map) {
- if (value > pair.second) {
- return false;
- }
- }
- return true;
- }
-
- bool empty()
- {
- for (const auto &pair : m_map) {
- if (0 != pair.second) {
- return false;
- }
- }
- return true;
- }
-
-private:
- std::unordered_map<stream_name_t, std::atomic_uint32_t> m_map;
-};
-
-class ScheduledNetworkGroup
-{
-public:
- static Expected<std::shared_ptr<ScheduledNetworkGroup>> create(std::shared_ptr<ConfiguredNetworkGroup> added_cng, StreamInfoVector &stream_infos);
-
- virtual ~ScheduledNetworkGroup() = default;
- ScheduledNetworkGroup(const ScheduledNetworkGroup &other) = delete;
- ScheduledNetworkGroup &operator=(const ScheduledNetworkGroup &other) = delete;
- ScheduledNetworkGroup &operator=(ScheduledNetworkGroup &&other) = delete;
- ScheduledNetworkGroup(ScheduledNetworkGroup &&other) noexcept = delete;
-
- bool has_enough_space_in_read_buffers(uint32_t ongoing_frames);
- bool has_input_written_most_frames(const std::string &stream_name);
- std::unordered_map<stream_name_t, uint32_t> total_written_frames_count();
- bool has_pending_frames();
- bool can_stream_read(const std::string &stream_name);
- bool use_dynamic_batch_flow();
- bool has_ng_drained_everything(bool streaming_mode);
- void decrease_current_ng_counters();
- uint32_t get_pre_transfer_h2d_frames_count();
-
- std::string get_network_group_name();
- uint32_t get_h2d_transferred_frames_count();
-
- std::shared_ptr<ConfiguredNetworkGroup> get_network_group();
-
- void mark_frame_sent();
-
- std::chrono::time_point<std::chrono::steady_clock> get_last_run_timestamp();
- void set_last_run_timestamp(const std::chrono::time_point<std::chrono::steady_clock> ×tamp);
-
- Expected<std::chrono::milliseconds> get_timeout(const stream_name_t &stream_name = "");
- hailo_status set_timeout(const std::chrono::milliseconds &timeout, const stream_name_t &stream_name = "");
- Expected<uint32_t> get_threshold(const stream_name_t &stream_name);
- hailo_status set_threshold(uint32_t threshold, const stream_name_t &stream_name = "");
-
- uint16_t get_max_batch_size();
-
- Counter &requested_write_frames();
- std::atomic_uint32_t &requested_write_frames(const stream_name_t &stream_name);
- uint32_t requested_write_frames_max_value();
- Counter &finished_write_frames();
- std::atomic_uint32_t &finished_write_frames(const stream_name_t &stream_name);
- uint32_t finished_write_frames_min_value();
-
- Counter &h2d_requested_transferred_frames();
- std::atomic_uint32_t &h2d_requested_transferred_frames(const stream_name_t &stream_name);
- Counter &h2d_finished_transferred_frames();
- std::atomic_uint32_t &h2d_finished_transferred_frames(const stream_name_t &stream_name);
-
- Counter &requested_read_frames();
- std::atomic_uint32_t &requested_read_frames(const stream_name_t &stream_name);
- Counter &ongoing_read_frames();
- std::atomic_uint32_t &ongoing_read_frames(const stream_name_t &stream_name);
-
- Counter &d2h_finished_transferred_frames();
- std::atomic_uint32_t &d2h_finished_transferred_frames(const stream_name_t &stream_name);
- Counter &finished_read_frames();
- std::atomic_uint32_t &finished_read_frames(const stream_name_t &stream_name);
- uint32_t finished_read_frames_min_value();
-
- const std::vector<stream_name_t> &get_outputs_names();
- const std::vector<stream_name_t> &get_inputs_names();
-
- bool is_nms()
- {
- return m_is_nms;
- }
-
- void push_device_index(uint32_t device_index);
- uint32_t pop_device_index(const stream_name_t &stream_name);
-
- ScheduledNetworkGroup(std::shared_ptr<ConfiguredNetworkGroup> cng, std::chrono::milliseconds timeout,
- uint16_t max_batch_size, StreamInfoVector &stream_infos, std::string network_group_name);
-
-private:
- std::shared_ptr<ConfiguredNetworkGroup> m_cng;
-
- std::chrono::time_point<std::chrono::steady_clock> m_last_run_time_stamp;
- std::chrono::milliseconds m_timeout;
-
- std::atomic_bool m_frame_was_sent;
- uint16_t m_max_batch_size;
-
- Counter m_requested_write_frames; // 'wait_for_write()' has been called
- Counter m_finished_write_frames; // 'signal_finished_write()' has been called - frame is written in buffer (writes are a-sync)
-
- Counter m_h2d_requested_transferred_frames; // 'send_pending_buffer()' has been called
- Counter m_h2d_finished_transferred_frames; // Frame has been transferred to device (intrpt was raised)
-
- Counter m_requested_read_frames; // 'wait_for_read()' has been called
- Counter m_ongoing_read_frames; // 'wait_for_read()' has finished, the user is blocking on read (reads are sync)
-
- Counter m_d2h_finished_transferred_frames; // Frame has been transferred from device (intrpt was raised)
- Counter m_finished_read_frames; // 'signal_finish_read()' has been called - user finished getting the frame
-
- std::unordered_map<stream_name_t, std::atomic_uint32_t> m_min_threshold_per_stream;
-
- std::string m_network_group_name;
-
- std::vector<stream_name_t> m_inputs_names;
- std::vector<stream_name_t> m_outputs_names;
-
- std::unordered_map<stream_name_t, std::queue<uint32_t>> m_output_streams_read_orders;
-
- bool m_is_nms;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_SCHEDULED_NETWORK_GROUP_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file scheduled_stream.hpp
- * @brief Internal stream implementation for scheduled streams
- *
- **/
-
-#ifndef HAILO_SCHEDULED_STREAM_HPP_
-#define HAILO_SCHEDULED_STREAM_HPP_
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "vdevice_internal.hpp"
-#include "vdma_device.hpp"
-#include "vdevice_stream.hpp"
-#include "hailo/expected.hpp"
-
-namespace hailort
-{
-
-class ScheduledInputStream : public InputVDeviceBaseStream {
-public:
- ScheduledInputStream(ScheduledInputStream &&other) :
- InputVDeviceBaseStream(std::move(other)),
- m_network_group_handle(std::move(other.m_network_group_handle)),
- m_network_group_scheduler(std::move(other.m_network_group_scheduler))
- {}
-
- explicit ScheduledInputStream(
- std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
- const scheduler_ng_handle_t &network_group_handle,
- EventPtr &&network_group_activated_event,
- const LayerInfo &layer_info,
- NetworkGroupSchedulerWeakPtr network_group_scheduler,
- hailo_status &status) :
- InputVDeviceBaseStream(std::move(streams), std::move(network_group_activated_event), layer_info, status),
- m_network_group_handle(network_group_handle),
- m_network_group_scheduler(network_group_scheduler)
- {}
-
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual bool is_scheduled() override { return true; };
-
- virtual void notify_all() override
- {
- auto scheduler = m_network_group_scheduler.lock();
- if (nullptr == scheduler) {
- LOGGER__CRITICAL("Failed to acquire scheduler");
- return;
- }
- scheduler->notify_all();
-
- for (auto &stream : m_streams) {
- stream.get().notify_all();
- }
- }
-
-protected:
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer,
- const std::function<bool()> &should_cancel = []() { return false; });
-
- Expected<size_t> sync_write_raw_buffer_impl(const MemoryView &buffer, scheduler_ng_handle_t network_group_handle,
- const std::function<bool()> &should_cancel);
-
- scheduler_ng_handle_t m_network_group_handle;
- NetworkGroupSchedulerWeakPtr m_network_group_scheduler;
-
-private:
- hailo_status abort_impl(scheduler_ng_handle_t network_group_handle);
- hailo_status clear_abort_impl(scheduler_ng_handle_t network_group_handle);
-};
-
-class ScheduledOutputStream : public OutputVDeviceBaseStream {
-public:
- ScheduledOutputStream(ScheduledOutputStream &&other) :
- OutputVDeviceBaseStream(std::move(other)),
- m_network_group_handle(std::move(other.m_network_group_handle)),
- m_network_group_scheduler(std::move(other.m_network_group_scheduler))
- {}
-
- explicit ScheduledOutputStream(
- std::vector<std::reference_wrapper<VdmaOutputStream>> &&streams,
- const scheduler_ng_handle_t &network_group_handle,
- const LayerInfo &layer_info,
- EventPtr &&network_group_activated_event,
- NetworkGroupSchedulerWeakPtr network_group_scheduler,
- hailo_status &status) :
- OutputVDeviceBaseStream(std::move(streams), layer_info, std::move(network_group_activated_event), status),
- m_network_group_handle(network_group_handle),
- m_network_group_scheduler(network_group_scheduler)
- {}
-
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual bool is_scheduled() override { return true; };
-
-protected:
- virtual hailo_status read(MemoryView buffer) override;
- hailo_status read_impl(MemoryView buffer, scheduler_ng_handle_t network_group_handle);
-
- scheduler_ng_handle_t m_network_group_handle;
- NetworkGroupSchedulerWeakPtr m_network_group_scheduler;
-
-private:
- hailo_status abort_impl(scheduler_ng_handle_t network_group_handle);
- hailo_status clear_abort_impl(scheduler_ng_handle_t network_group_handle);
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_SCHEDULED_STREAM_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file scheduler_mon.hpp
- * @brief Defines for scheduler monitor of networks.
- **/
-
-#ifndef _HAILO_SCHEDULER_MON_HPP_
-#define _HAILO_SCHEDULER_MON_HPP_
-
-#include "hailo/hailort.hpp"
-#include "common/filesystem.hpp"
-
-#if defined(_MSC_VER)
-#pragma warning(push)
-#pragma warning(disable: 4244 4267 4127)
-#else
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wconversion"
-#endif
-#include "scheduler_mon.pb.h"
-#if defined(_MSC_VER)
-#pragma warning( pop )
-#else
-#pragma GCC diagnostic pop
-#endif
-
-#include <iostream>
-#include <string>
-
-namespace hailort
-{
-
-#define SCHEDULER_MON_TMP_DIR ("/tmp/hmon_files/")
-#define SCHEDULER_MON_ENV_VAR ("SCHEDULER_MONITOR")
-#define DEFAULT_SCHEDULER_MON_INTERVAL (std::chrono::seconds(1))
-#define SCHEDULER_MON_NAN_VAL (-1)
-
-class SchedulerMon
-{
-public:
-
- static bool should_monitor()
- {
- #if defined(__GNUC__)
- auto mon_var = std::getenv(SCHEDULER_MON_ENV_VAR);
- return (mon_var != nullptr) && strncmp(mon_var, "1", 1) == 0;
- #else
- // TODO: HRT-7304 - Add support for windows
- return false;
- #endif
- }
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_SCHEDULER_MON_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file scheduler_oracle.cpp
- * @brief:
- **/
-
-#include "scheduler_oracle.hpp"
-#include "tracer_macros.hpp"
-
-namespace hailort
-{
-
-bool NetworkGroupSchedulerOracle::choose_next_model(NetworkGroupScheduler &scheduler, uint32_t device_id)
-{
- auto cngs_size = scheduler.m_cngs.size();
- auto& device_info = scheduler.m_devices[device_id];
- for (uint32_t i = 0; i < cngs_size; i++) {
- uint32_t index = scheduler.m_last_choosen_network_group + i + 1;
- index %= static_cast<uint32_t>(cngs_size);
- auto ready_info = scheduler.is_network_group_ready(index, true, device_id);
- if (ready_info.is_ready) {
- TRACE(ChooseNetworkGroupTrace, "", index, ready_info.threshold, ready_info.timeout);
- device_info->is_switching_network_group = true;
- device_info->next_network_group_handle = index;
- scheduler.m_last_choosen_network_group = index;
- return true;
- }
- }
- return false;
-}
-
-// TODO: return device handle instead index
-uint32_t NetworkGroupSchedulerOracle::get_avail_device(NetworkGroupScheduler &scheduler, scheduler_ng_handle_t network_group_handle)
-{
- const bool check_threshold = false;
-
- // Check if should be next
- /* Checking (INVALID_NETWORK_GROUP_HANDLE == m_current_network_group) for activating the first time the scheduler is running.
- In this case we don't want to check threshold. */
- for (auto active_device_info : scheduler.m_devices) {
- if (active_device_info->is_switching_network_group && scheduler.has_ng_drained_everything(active_device_info->current_network_group_handle, active_device_info->device_id) &&
- (((INVALID_NETWORK_GROUP_HANDLE == active_device_info->current_network_group_handle) &&
- scheduler.is_network_group_ready(network_group_handle, check_threshold, active_device_info->device_id).is_ready) ||
- (active_device_info->next_network_group_handle == network_group_handle))) {
- return active_device_info->device_id;
- }
- }
-
- // Check if device Idle with this network active
- for (auto active_device_info : scheduler.m_devices) {
- if ((active_device_info->current_network_group_handle == network_group_handle) && !active_device_info->is_switching_network_group &&
- scheduler.has_ng_drained_everything(active_device_info->current_network_group_handle, active_device_info->device_id) &&
- scheduler.is_network_group_ready(network_group_handle, check_threshold, active_device_info->device_id).is_ready) {
- scheduler.m_last_choosen_network_group = network_group_handle;
- return active_device_info->device_id;
- }
- }
-
- // Check if device Idle
- for (auto active_device_info : scheduler.m_devices) {
- if (!active_device_info->is_switching_network_group && scheduler.has_ng_drained_everything(active_device_info->current_network_group_handle, active_device_info->device_id) &&
- scheduler.is_network_group_ready(network_group_handle, check_threshold, active_device_info->device_id).is_ready) {
- scheduler.m_last_choosen_network_group = network_group_handle;
- return active_device_info->device_id;
- }
- }
-
- return INVALID_DEVICE_ID;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file scheduler_oracle.hpp
- * @brief
- **/
-
-#ifndef _HAILO_SCHEDULER_ORACLE_HPP_
-#define _HAILO_SCHEDULER_ORACLE_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/network_group.hpp"
-#include "common/utils.hpp"
-#include "network_group_scheduler.hpp"
-
-namespace hailort
-{
-
-class NetworkGroupSchedulerOracle
-{
-public:
- static bool choose_next_model(NetworkGroupScheduler &scheduler, uint32_t device_id);
- static uint32_t get_avail_device(NetworkGroupScheduler &scheduler, scheduler_ng_handle_t network_group_handle);
-
-private:
- NetworkGroupSchedulerOracle() {}
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_SCHEDULER_ORACLE_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file sensor_config_utils.cpp
- * @brief Utilities for sensor_config operations
- **/
-
-#include "sensor_config_utils.hpp"
-#include "common/string_utils.hpp"
-#include "common/utils.hpp"
-
-#include <fstream>
-#include <sstream>
-#include <iomanip>
-
-namespace hailort
-{
-
-Expected<SENSOR_CONFIG_OPCODES_t> SensorConfigUtils::get_sensor_opcode_by_name(const std::string &name)
-{
- if (name == "SENSOR_CONFIG_OPCODES_WR") {
- return SENSOR_CONFIG_OPCODES_WR;
- }
- else if (name == "SENSOR_CONFIG_OPCODES_RD") {
- return SENSOR_CONFIG_OPCODES_RD;
- }
- else if (name == "SENSOR_CONFIG_OPCODES_RMW") {
- return SENSOR_CONFIG_OPCODES_RMW;
- }
- else if (name == "SENSOR_CONFIG_OPCODES_DELAY") {
- return SENSOR_CONFIG_OPCODES_DELAY;
- }
- else {
- LOGGER__ERROR("Failed getting opcode value by name: {}", name);
- return make_unexpected(HAILO_NOT_FOUND);
- }
-}
-
-Expected<std::string> SensorConfigUtils::convert_opcode_to_string(uint8_t opcode)
-{
- switch (opcode) {
- case SENSOR_CONFIG_OPCODES_WR:
- return std::string("SENSOR_CONFIG_OPCODES_WR");
-
- case SENSOR_CONFIG_OPCODES_RD:
- return std::string("SENSOR_CONFIG_OPCODES_RD");
-
- case SENSOR_CONFIG_OPCODES_RMW:
- return std::string("SENSOR_CONFIG_OPCODES_RMW");
-
- case SENSOR_CONFIG_OPCODES_DELAY:
- return std::string("SENSOR_CONFIG_OPCODES_DELAY");
-
- default:
- LOGGER__ERROR("Failed converting opcode to string");
- return make_unexpected(HAILO_NOT_FOUND);
- }
-}
-
-Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> SensorConfigUtils::read_config_file(const std::string &config_file_path)
-{
- std::ifstream config_file;
- config_file.open(config_file_path, std::ios::in);
- CHECK_AS_EXPECTED(config_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening sensor config file with errno: {}", errno);
-
- std::vector<SENSOR_CONFIG__operation_cfg_t> control_buffers;
- std::string line;
- std::string col;
-
- while(std::getline(config_file, line)) {
- std::stringstream s(line);
- CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading line in sensor config file with errno: {}", errno);
-
- SENSOR_CONFIG__operation_cfg_t config_entry = {};
-
- // opcode
- std::getline(s, col, ',' );
- CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file opcode with errno: {}", errno);
- auto opcode = get_sensor_opcode_by_name(col);
- CHECK_EXPECTED(opcode, "Failed getting opcode value");
- config_entry.operation = static_cast<uint8_t>(opcode.value());
-
- // length
- std::getline(s, col, ',' );
- CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file length with errno: {}", errno);
- auto length = StringUtils::to_uint8(col, 10);
- CHECK_EXPECTED(length);
- config_entry.length = length.value();
-
- // page
- std::getline(s, col, ',' );
- CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file page with errno: {}", errno);
- auto page = StringUtils::to_int32(col, 16);
- CHECK_EXPECTED(page);
- if (0 > page.value()) {
- config_entry.page = 0xff;
- } else {
- auto page_uint8 = StringUtils::to_uint8(col, 16);
- CHECK_EXPECTED(page_uint8);
- config_entry.page = page_uint8.value();
- }
-
- // address
- std::getline(s, col, ',' );
- CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file address with errno: {}", errno);
- auto address = StringUtils::to_uint32(col, 16);
- CHECK_EXPECTED(address);
- config_entry.address = address.value();
-
- // bitmask
- std::getline(s, col, ',' );
- CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file bitmask with errno: {}", errno);
- auto bitmask = StringUtils::to_uint32(col, 16);
- CHECK_EXPECTED(bitmask);
- config_entry.bitmask = bitmask.value();
-
- // value
- std::getline(s, col, ',' );
- CHECK_AS_EXPECTED(!s.bad(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file value with errno: {}", errno);
- auto value = StringUtils::to_uint32(col, 16);
- CHECK_EXPECTED(value);
- config_entry.value = value.value();
-
- control_buffers.emplace_back(config_entry);
- }
- CHECK_AS_EXPECTED(!config_file.bad(), HAILO_FILE_OPERATION_FAILURE, "Failed reading line in sensor config file with errno: {}", errno);
-
- return control_buffers;
-}
-
-Expected<SENSOR_CONFIG__operation_cfg_t> SensorConfigUtils::create_config_entry(uint8_t page, uint32_t address, uint8_t length, const std::string &hex_value)
-{
- auto config_entry_value = StringUtils::to_uint32(hex_value, 16);
- CHECK_EXPECTED(config_entry_value);
-
- SENSOR_CONFIG__operation_cfg_t config_entry = {};
- config_entry.value = config_entry_value.value();
- config_entry.operation = SENSOR_CONFIG_OPCODES_WR;
- config_entry.length = length;
- config_entry.page = page;
- config_entry.address = address;
- config_entry.bitmask = 0xFFFF;
-
- return config_entry;
-}
-
-Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> SensorConfigUtils::read_isp_config_file(const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path)
-{
- std::vector<std::string> config_files = {isp_static_config_file_path, isp_runtime_config_file_path};
- std::vector<SENSOR_CONFIG__operation_cfg_t> control_buffers;
-
- for (const auto &config_file_path : config_files) {
- std::ifstream config_file;
- config_file.open(config_file_path, std::ios::in);
- CHECK_AS_EXPECTED(config_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening sensor ISP config file with errno: {}", errno);
-
- std::string line;
- uint8_t page = 0;
- uint32_t address = 0;
-
- while (std::getline(config_file, line)) {
- size_t comment_index = line.find("//");
- if (((std::string::npos != comment_index) && (0 == comment_index)) || ("\n" == line) ||
- ("\r\n" == line) || ("\r" == line) || ("" == line)) {
- continue;
- }
-
- std::string::iterator it = line.begin();
- CHECK_AS_EXPECTED(line.size() >= CONFIG_HEX_VALUE_LAST_CHAR_OFFSET, HAILO_INVALID_ARGUMENT, "Failed processing line {}. The line is not in the expected format. ", line);
- std::string prefix(it, it + CONFIG_PREFIX_LENGTH);
- std::string hex_value(it + CONFIG_PREFIX_LENGTH, it + CONFIG_HEX_VALUE_LAST_CHAR_OFFSET);
-
- // page
- if ("btp" == prefix) {
- auto page_expected = StringUtils::to_uint8(hex_value, 16);
- CHECK_EXPECTED(page_expected);
- page = page_expected.value();
- }
-
- // address
- else if ("bta" == prefix) {
- auto address_expected = StringUtils::to_uint32(hex_value, 16);
- CHECK_EXPECTED(address_expected);
- address = address_expected.value();
- }
-
- else if ("btb" == prefix) {
- auto config_entry = create_config_entry(page, address, 8, hex_value);
- CHECK_EXPECTED(config_entry);
-
- control_buffers.emplace_back(config_entry.release());
- address = address + 1;
- }
-
- else if ("bth" == prefix) {
- auto config_entry = create_config_entry(page, address, 16, hex_value);
- CHECK_EXPECTED(config_entry);
-
- control_buffers.emplace_back(config_entry.release());
- address = address + 2;
- }
-
- else if ("btw" == prefix) {
- auto config_entry = create_config_entry(page, address, 32, hex_value);
- CHECK_EXPECTED(config_entry);
-
- control_buffers.emplace_back(config_entry.release());
- address = address + 4;
- }
-
- else {
- LOGGER__ERROR("Invalid configuration prefix: {}", prefix);
- return make_unexpected(HAILO_NOT_FOUND);
- }
- }
- CHECK_AS_EXPECTED(!config_file.bad(), HAILO_FILE_OPERATION_FAILURE, "Failed reading line in sensor ISP config file with errno: {}", errno);
- }
-
- return control_buffers;
-}
-
-hailo_status SensorConfigUtils::dump_config_to_csv(SENSOR_CONFIG__operation_cfg_t *operation_cfg, const std::string &config_file_path, uint32_t entries_count)
-{
- std::ofstream config_file;
- config_file.open(config_file_path, std::ios::out);
- CHECK(config_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening sensor config file with errno: {}", errno);
-
- for (size_t i = 0; i < entries_count; i++) {
- SENSOR_CONFIG__operation_cfg_t *config_entry = &operation_cfg[i];
-
- int page = (config_entry->page == 0xff) ? -1 : config_entry->page;
- int hex_width_filler = (config_entry->length == 8) ? 2 : 4;
- auto opcode_string = convert_opcode_to_string(config_entry->operation);
- CHECK_EXPECTED_AS_STATUS(opcode_string);
-
- // There is no need to restore flags since they only affect the fstream "config_file" and doens't affect std::cout or other files.
- config_file << std::dec << opcode_string.value() << "," << static_cast<uint32_t>(config_entry->length) << "," << page <<
- ",0x" << std::uppercase << std::hex << std::setfill('0') << std::setw(4) << config_entry->address <<
- ",0x" << std::setfill('0') << std::setw(hex_width_filler) << config_entry->bitmask <<
- ",0x" << std::setfill('0') << std::setw(hex_width_filler) << config_entry->value << std::endl;
- }
-
- return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file sensor_config_utils.hpp
- * @brief Utilities for sensor_config operations
- **/
-
-#ifndef _HAILO_SENSOR_CONFIG_UTILS_HPP_
-#define _HAILO_SENSOR_CONFIG_UTILS_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "control_protocol.h"
-
-#include <vector>
-#include <string>
-
-namespace hailort
-{
-
-#define MAX_CONFIG_INFO_ENTRIES (CONTROL_PROTOCOL__MAX_REQUEST_PAYLOAD_SIZE / sizeof(SENSOR_CONFIG__operation_cfg_t))
-#define MAX_CONFIG_ENTRIES_DATA_SIZE (MAX_CONFIG_INFO_ENTRIES * sizeof(SENSOR_CONFIG__operation_cfg_t))
-#define MAX_NON_ISP_SECTIONS (6)
-#define CONFIG_PREFIX_LENGTH (3)
-#define CONFIG_HEX_VALUE_LAST_CHAR_OFFSET (9)
-
-static_assert((MAX_CONFIG_INFO_ENTRIES > 0) ,"MAX_CONFIG_INFO_ENTRIES must be larger than 0");
-
-class SensorConfigUtils {
-public:
- static Expected<SENSOR_CONFIG_OPCODES_t> get_sensor_opcode_by_name(const std::string &name);
- static Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> read_config_file(const std::string &config_file_path);
- static Expected<SENSOR_CONFIG__operation_cfg_t> create_config_entry(uint8_t page, uint32_t address, uint8_t length, const std::string &hex_value);
- static Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> read_isp_config_file(const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path);
- static Expected<std::string> convert_opcode_to_string(uint8_t opcode);
- static hailo_status dump_config_to_csv(SENSOR_CONFIG__operation_cfg_t *operation_cfg, const std::string &config_file_path, uint32_t entries_count);
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_SENSOR_CONFIG_UTILS_HPP_ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hailort_rpc_client.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/network_group_client.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_rpc_client.cpp
+ * @brief Implementation of the hailort rpc client
+ **/
+
+#include "common/utils.hpp"
+
+#include "hef/hef_internal.hpp"
+#include "hailort_rpc_client.hpp"
+
+#include <grpcpp/health_check_service_interface.h>
+
+
+namespace hailort
+{
+
+hailo_status HailoRtRpcClient::client_keep_alive(uint32_t pid)
+{
+ keepalive_Request request;
+ request.set_pid(pid);
+ empty reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->client_keep_alive(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ return HAILO_SUCCESS;
+}
+
+Expected<hailo_version_t> HailoRtRpcClient::get_service_version()
+{
+ get_service_version_Request request;
+ get_service_version_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->get_service_version(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto version_proto = reply.hailo_version();
+ hailo_version_t service_version = {version_proto.major_version(), version_proto.minor_version(), version_proto.revision_version()};
+ return service_version;
+}
+
+Expected<uint32_t> HailoRtRpcClient::VDevice_create(const hailo_vdevice_params_t ¶ms, uint32_t pid) {
+ VDevice_create_Request request;
+ request.set_pid(pid);
+ auto proto_vdevice_params = request.mutable_hailo_vdevice_params();
+ proto_vdevice_params->set_device_count(params.device_count);
+ auto ids = proto_vdevice_params->mutable_device_ids();
+ if (params.device_ids != nullptr) {
+ for (size_t i = 0; i < params.device_count; ++i) {
+ ids->Add(std::string(params.device_ids[i].id));
+ }
+ }
+ proto_vdevice_params->set_scheduling_algorithm(params.scheduling_algorithm);
+ proto_vdevice_params->set_group_id(params.group_id == nullptr ? "" : std::string(params.group_id));
+
+ VDevice_create_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->VDevice_create(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return reply.handle();
+}
+
+Expected<uint32_t> HailoRtRpcClient::VDevice_dup_handle(uint32_t pid, uint32_t handle)
+{
+ dup_handle_Request request;
+ request.set_pid(pid);
+ request.set_handle(handle);
+ dup_handle_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->VDevice_dup_handle(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ return reply.handle();
+}
+
+hailo_status HailoRtRpcClient::VDevice_release(uint32_t handle)
+{
+ Release_Request request;
+ request.set_handle(handle);
+
+ Release_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->VDevice_release(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+ return HAILO_SUCCESS;
+}
+
+Expected<std::vector<uint32_t>> HailoRtRpcClient::InputVStreams_create(uint32_t net_group_handle,
+ const std::map<std::string, hailo_vstream_params_t> &inputs_params, uint32_t pid)
+{
+ VStream_create_Request request;
+ request.set_net_group(net_group_handle);
+ request.set_pid(pid);
+ auto proto_vstreams_params = request.mutable_vstreams_params();
+ for (const auto &name_params_pair : inputs_params) {
+ ProtoNamedVStreamParams proto_name_param_pair;
+ auto vstream_params = name_params_pair.second;
+
+ proto_name_param_pair.set_name(name_params_pair.first);
+ auto proto_vstream_param = proto_name_param_pair.mutable_params();
+
+ auto proto_user_buffer_format = proto_vstream_param->mutable_user_buffer_format();
+ auto user_buffer_format = vstream_params.user_buffer_format;
+ proto_user_buffer_format->set_type(user_buffer_format.type);
+ proto_user_buffer_format->set_order(user_buffer_format.order);
+ proto_user_buffer_format->set_flags(user_buffer_format.flags);
+
+ proto_vstream_param->set_timeout_ms(vstream_params.timeout_ms);
+ proto_vstream_param->set_queue_size(vstream_params.queue_size);
+
+ proto_vstream_param->set_vstream_stats_flags(vstream_params.vstream_stats_flags);
+ proto_vstream_param->set_pipeline_elements_stats_flags(vstream_params.vstream_stats_flags);
+
+ proto_vstreams_params->Add(std::move(proto_name_param_pair));
+ }
+
+ VStreams_create_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->InputVStreams_create(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ std::vector<uint32_t> input_vstreams_handles;
+ input_vstreams_handles.reserve(reply.handles_size());
+ for (auto &handle : *reply.mutable_handles()) {
+ input_vstreams_handles.push_back(handle);
+ }
+ return input_vstreams_handles;
+}
+
+hailo_status HailoRtRpcClient::InputVStream_release(uint32_t handle)
+{
+ Release_Request request;
+ request.set_handle(handle);
+
+ Release_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->InputVStream_release(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+ return HAILO_SUCCESS;
+}
+
+Expected<std::vector<uint32_t>> HailoRtRpcClient::OutputVStreams_create(uint32_t net_group_handle,
+ const std::map<std::string, hailo_vstream_params_t> &output_params, uint32_t pid)
+{
+ VStream_create_Request request;
+ request.set_net_group(net_group_handle);
+ request.set_pid(pid);
+ auto proto_vstreams_params = request.mutable_vstreams_params();
+ for (const auto &name_params_pair : output_params) {
+ ProtoNamedVStreamParams proto_name_param_pair;
+ auto vstream_params = name_params_pair.second;
+
+ proto_name_param_pair.set_name(name_params_pair.first);
+ auto proto_vstream_param = proto_name_param_pair.mutable_params();
+
+ auto proto_user_buffer_format = proto_vstream_param->mutable_user_buffer_format();
+ auto user_buffer_format = vstream_params.user_buffer_format;
+ proto_user_buffer_format->set_type(user_buffer_format.type);
+ proto_user_buffer_format->set_order(user_buffer_format.order);
+ proto_user_buffer_format->set_flags(user_buffer_format.flags);
+
+ proto_vstream_param->set_timeout_ms(vstream_params.timeout_ms);
+ proto_vstream_param->set_queue_size(vstream_params.queue_size);
+
+ proto_vstream_param->set_vstream_stats_flags(vstream_params.vstream_stats_flags);
+ proto_vstream_param->set_pipeline_elements_stats_flags(vstream_params.vstream_stats_flags);
+
+ proto_vstreams_params->Add(std::move(proto_name_param_pair));
+ }
+
+ VStreams_create_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->OutputVStreams_create(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ std::vector<uint32_t> output_vstreams_handles;
+ output_vstreams_handles.reserve(reply.handles_size());
+ for (auto &handle : *reply.mutable_handles()) {
+ output_vstreams_handles.push_back(handle);
+ }
+ return output_vstreams_handles;
+}
+
+hailo_status HailoRtRpcClient::OutputVStream_release(uint32_t handle)
+{
+ Release_Request request;
+ request.set_handle(handle);
+
+ Release_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->OutputVStream_release(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+ return HAILO_SUCCESS;
+}
+
+Expected<uint32_t> HailoRtRpcClient::InputVStream_dup_handle(uint32_t pid, uint32_t handle)
+{
+ dup_handle_Request request;
+ request.set_pid(pid);
+ request.set_handle(handle);
+ dup_handle_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->InputVStream_dup_handle(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ return reply.handle();
+}
+
+Expected<uint32_t> HailoRtRpcClient::OutputVStream_dup_handle(uint32_t pid, uint32_t handle)
+{
+ dup_handle_Request request;
+ request.set_pid(pid);
+ request.set_handle(handle);
+ dup_handle_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->OutputVStream_dup_handle(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ return reply.handle();
+}
+
+Expected<std::vector<uint32_t>> HailoRtRpcClient::VDevice_configure(uint32_t vdevice_handle, const Hef &hef,
+ uint32_t pid, const NetworkGroupsParamsMap &configure_params)
+{
+ VDevice_configure_Request request;
+ request.set_handle(vdevice_handle);
+ request.set_pid(pid);
+ auto hef_memview = hef.pimpl->get_hef_memview();
+ request.set_hef(hef_memview.data(), hef_memview.size());
+
+ // Serialize NetworkGroupsParamsMap
+ for (const auto &name_params_pair : configure_params) {
+ auto proto_net_params = request.add_configure_params_map();
+ proto_net_params->set_name(name_params_pair.first);
+
+ auto net_configure_params = name_params_pair.second;
+ auto proto_network_configure_params = proto_net_params->mutable_params();
+ proto_network_configure_params->set_batch_size(net_configure_params.batch_size);
+ proto_network_configure_params->set_power_mode(net_configure_params.power_mode);
+ proto_network_configure_params->set_latency(net_configure_params.latency);
+
+ // Init stream params map
+ for (const auto &name_stream_params_pair : net_configure_params.stream_params_by_name) {
+ auto proto_name_streams_params = proto_network_configure_params->add_stream_params_map();
+ proto_name_streams_params->set_name(name_stream_params_pair.first);
+
+ auto proto_stream_params = proto_name_streams_params->mutable_params();
+ auto stream_params = name_stream_params_pair.second;
+ proto_stream_params->set_stream_interface(stream_params.stream_interface);
+ proto_stream_params->set_direction(stream_params.direction);
+ proto_stream_params->set_flags(stream_params.flags);
+ }
+
+ // Init network params map
+ for (const auto &name_network_params_pair : net_configure_params.network_params_by_name) {
+ auto proto_name_network_params = proto_network_configure_params->add_network_params_map();
+ proto_name_network_params->set_name(name_network_params_pair.first);
+
+ auto proto_network_params = proto_name_network_params->mutable_params();
+ auto network_params = name_network_params_pair.second;
+ proto_network_params->set_batch_size(network_params.batch_size);
+ }
+ }
+
+ VDevice_configure_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->VDevice_configure(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+
+ std::vector<uint32_t> networks_handles(reply.networks_handles().begin(), reply.networks_handles().end());
+ return networks_handles;
+}
+
+Expected<std::vector<std::string>> HailoRtRpcClient::VDevice_get_physical_devices_ids(uint32_t handle)
+{
+ VDevice_get_physical_devices_ids_Request request;
+ request.set_handle(handle);
+
+ VDevice_get_physical_devices_ids_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->VDevice_get_physical_devices_ids(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ std::vector<std::string> result;
+ for (auto &device_id_proto : reply.devices_ids()) {
+ result.push_back(device_id_proto);
+ }
+ return result;
+}
+
+Expected<hailo_stream_interface_t> HailoRtRpcClient::VDevice_get_default_streams_interface(uint32_t handle)
+{
+ VDevice_get_default_streams_interface_Request request;
+ request.set_handle(handle);
+
+ VDevice_get_default_streams_interface_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->VDevice_get_default_streams_interface(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ CHECK_AS_EXPECTED(reply.stream_interface() < HAILO_STREAM_INTERFACE_MAX_ENUM, HAILO_INTERNAL_FAILURE,
+ "stream_interface {} out of range", reply.stream_interface());
+ return static_cast<hailo_stream_interface_t>(reply.stream_interface());
+}
+
+Expected<uint32_t> HailoRtRpcClient::ConfiguredNetworkGroup_dup_handle(uint32_t pid, uint32_t handle)
+{
+ dup_handle_Request request;
+ request.set_pid(pid);
+ request.set_handle(handle);
+ dup_handle_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_dup_handle(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ return reply.handle();
+}
+
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_release(uint32_t handle)
+{
+ Release_Request request;
+ request.set_handle(handle);
+
+ Release_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_release(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+ return HAILO_SUCCESS;
+}
+
+std::map<std::string, hailo_vstream_params_t> get_group(const ProtoNamedVStreamParamsMap &named_params_map)
+{
+ std::map<std::string, hailo_vstream_params_t> result;
+ for (auto &named_params : named_params_map.vstream_params_map()) {
+ auto name = named_params.name();
+ auto proto_params = named_params.params();
+ auto proto_user_buffer_format = proto_params.user_buffer_format();
+ hailo_format_t user_buffer_format = {
+ static_cast<hailo_format_type_t>(proto_user_buffer_format.type()),
+ static_cast<hailo_format_order_t>(proto_user_buffer_format.order()),
+ static_cast<hailo_format_flags_t>(proto_user_buffer_format.flags())
+ };
+ hailo_vstream_params_t params = {
+ user_buffer_format,
+ proto_params.timeout_ms(),
+ proto_params.queue_size(),
+ static_cast<hailo_vstream_stats_flags_t>(proto_params.vstream_stats_flags()),
+ static_cast<hailo_pipeline_elem_stats_flags_t>(proto_params.pipeline_elements_stats_flags())
+ };
+ result.insert({name, params});
+ }
+ return result;
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::ConfiguredNetworkGroup_make_input_vstream_params(
+ uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ ConfiguredNetworkGroup_make_input_vstream_params_Request request;
+ request.set_handle(handle);
+ request.set_quantized(quantized);
+ request.set_format_type(format_type);
+ request.set_timeout_ms(timeout_ms);
+ request.set_queue_size(queue_size);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_make_input_vstream_params_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_make_input_vstream_params(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return get_group(reply.vstream_params_map());
+}
+
+Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> HailoRtRpcClient::ConfiguredNetworkGroup_make_output_vstream_params_groups(
+ uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ ConfiguredNetworkGroup_make_output_vstream_params_groups_Request request;
+ request.set_handle(handle);
+ request.set_quantized(quantized);
+ request.set_format_type(format_type);
+ request.set_timeout_ms(timeout_ms);
+ request.set_queue_size(queue_size);
+
+ ConfiguredNetworkGroup_make_output_vstream_params_groups_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_make_output_vstream_params_groups(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ std::vector<std::map<std::string, hailo_vstream_params_t>> result;
+ for (auto &map_proto : reply.vstream_params_groups()) {
+ auto group = get_group(map_proto);
+ result.push_back(group);
+ }
+ return result;
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::ConfiguredNetworkGroup_make_output_vstream_params(
+ uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ ConfiguredNetworkGroup_make_output_vstream_params_Request request;
+ request.set_handle(handle);
+ request.set_quantized(quantized);
+ request.set_format_type(format_type);
+ request.set_timeout_ms(timeout_ms);
+ request.set_queue_size(queue_size);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_make_output_vstream_params_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_make_output_vstream_params(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ std::map<std::string, hailo_vstream_params_t> result;
+ for (int i = 0; i < reply.vstream_params_map().vstream_params_map_size(); ++i) {
+ auto name = reply.vstream_params_map().vstream_params_map(i).name();
+ auto proto_params = reply.vstream_params_map().vstream_params_map(i).params();
+ auto proto_user_buffer_format = proto_params.user_buffer_format();
+ hailo_format_t user_buffer_format = {
+ static_cast<hailo_format_type_t>(proto_user_buffer_format.type()),
+ static_cast<hailo_format_order_t>(proto_user_buffer_format.order()),
+ static_cast<hailo_format_flags_t>(proto_user_buffer_format.flags())
+ };
+ hailo_vstream_params_t params = {
+ user_buffer_format,
+ proto_params.timeout_ms(),
+ proto_params.queue_size(),
+ static_cast<hailo_vstream_stats_flags_t>(proto_params.vstream_stats_flags()),
+ static_cast<hailo_pipeline_elem_stats_flags_t>(proto_params.pipeline_elements_stats_flags())
+ };
+ result.insert({name, params});
+ }
+ return result;
+}
+
+Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_group_name(uint32_t handle)
+{
+ return ConfiguredNetworkGroup_name(handle);
+}
+
+Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_name(uint32_t handle)
+{
+ ConfiguredNetworkGroup_name_Request request;
+ request.set_handle(handle);
+
+ ConfiguredNetworkGroup_name_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_name(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto network_group_name = reply.network_group_name();
+ return network_group_name;
+}
+
+Expected<std::vector<hailo_network_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_infos(uint32_t handle)
+{
+ ConfiguredNetworkGroup_get_network_infos_Request request;
+ request.set_handle(handle);
+
+ ConfiguredNetworkGroup_get_network_infos_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_network_infos(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto network_infos_proto = reply.network_infos();
+ std::vector<hailo_network_info_t> network_infos;
+ network_infos.reserve(network_infos_proto.size());
+ for (auto& info_proto : network_infos_proto) {
+ hailo_network_info_t info;
+ strcpy(info.name, info_proto.c_str());
+ network_infos.push_back(info);
+ }
+ return network_infos;
+}
+
+Expected<std::vector<hailo_stream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_stream_infos(uint32_t handle,
+ const std::string &network_name)
+{
+ ConfiguredNetworkGroup_get_all_stream_infos_Request request;
+ request.set_handle(handle);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_get_all_stream_infos_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_all_stream_infos(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ std::vector<hailo_stream_info_t> result;
+ result.reserve(reply.stream_infos().size());
+ for (auto proto_stream_info : reply.stream_infos()) {
+ hailo_3d_image_shape_t shape{
+ proto_stream_info.stream_shape().shape().height(),
+ proto_stream_info.stream_shape().shape().width(),
+ proto_stream_info.stream_shape().shape().features(),
+ };
+ hailo_3d_image_shape_t hw_shape{
+ proto_stream_info.stream_shape().hw_shape().height(),
+ proto_stream_info.stream_shape().hw_shape().width(),
+ proto_stream_info.stream_shape().hw_shape().features(),
+ };
+ hailo_nms_defuse_info_t nms_defuse_info{
+ proto_stream_info.nms_info().defuse_info().class_group_index(),
+ {0}
+ };
+ strcpy(nms_defuse_info.original_name, proto_stream_info.nms_info().defuse_info().original_name().c_str());
+ hailo_nms_info_t nms_info{
+ proto_stream_info.nms_info().number_of_classes(),
+ proto_stream_info.nms_info().max_bboxes_per_class(),
+ proto_stream_info.nms_info().bbox_size(),
+ proto_stream_info.nms_info().chunks_per_frame(),
+ proto_stream_info.nms_info().is_defused(),
+ nms_defuse_info,
+ };
+ hailo_format_t format{
+ static_cast<hailo_format_type_t>(proto_stream_info.format().type()),
+ static_cast<hailo_format_order_t>(proto_stream_info.format().order()),
+ static_cast<hailo_format_flags_t>(proto_stream_info.format().flags())
+ };
+ hailo_quant_info_t quant_info{
+ proto_stream_info.quant_info().qp_zp(),
+ proto_stream_info.quant_info().qp_scale(),
+ proto_stream_info.quant_info().limvals_min(),
+ proto_stream_info.quant_info().limvals_max()
+ };
+ hailo_stream_info_t stream_info;
+ if (format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
+ stream_info.nms_info = nms_info;
+ } else {
+ stream_info.shape = shape;
+ stream_info.hw_shape = hw_shape;
+ }
+ stream_info.hw_data_bytes = proto_stream_info.hw_data_bytes();
+ stream_info.hw_frame_size = proto_stream_info.hw_frame_size();
+ stream_info.format = format;
+ stream_info.direction = static_cast<hailo_stream_direction_t>(proto_stream_info.direction());
+ stream_info.index = static_cast<uint8_t>(proto_stream_info.index());
+ strcpy(stream_info.name, proto_stream_info.name().c_str());
+ stream_info.quant_info = quant_info;
+ stream_info.is_mux = proto_stream_info.is_mux();
+ result.push_back(stream_info);
+ }
+ return result;
+}
+
+Expected<hailo_stream_interface_t> HailoRtRpcClient::ConfiguredNetworkGroup_get_default_stream_interface(uint32_t handle)
+{
+ ConfiguredNetworkGroup_get_default_stream_interface_Request request;
+ request.set_handle(handle);
+
+ ConfiguredNetworkGroup_get_default_stream_interface_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_default_stream_interface(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto stream_interface = static_cast<hailo_stream_interface_t>(reply.stream_interface());
+ return stream_interface;
+}
+
+Expected<std::vector<std::vector<std::string>>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_groups(uint32_t handle)
+{
+ ConfiguredNetworkGroup_get_output_vstream_groups_Request request;
+ request.set_handle(handle);
+
+ ConfiguredNetworkGroup_get_output_vstream_groups_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_output_vstream_groups(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto vstream_groups_proto = reply.output_vstream_groups();
+ std::vector<std::vector<std::string>> result;
+ result.reserve(vstream_groups_proto.size());
+ for (auto& vstream_group_proto : vstream_groups_proto) {
+ std::vector<std::string> group;
+ group.reserve(vstream_group_proto.vstream_group().size());
+ for (auto& name : vstream_group_proto.vstream_group()) {
+ group.push_back(name);
+ }
+ result.push_back(group);
+ }
+ return result;
+}
+
+hailo_vstream_info_t deserialize_vstream_info(const ProtoVStreamInfo &info_proto)
+{
+ hailo_vstream_info_t info;
+ strcpy(info.name, info_proto.name().c_str());
+ strcpy(info.network_name, info_proto.network_name().c_str());
+ info.direction = static_cast<hailo_stream_direction_t>(info_proto.direction());
+ hailo_format_t format = {
+ static_cast<hailo_format_type_t>(info_proto.format().type()),
+ static_cast<hailo_format_order_t>(info_proto.format().order()),
+ static_cast<hailo_format_flags_t>(info_proto.format().flags())
+ };
+ info.format = format;
+ if (format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
+ hailo_nms_shape_t nms_shape = {
+ info_proto.nms_shape().number_of_classes(),
+ info_proto.nms_shape().max_bbox_per_class()
+ };
+ info.nms_shape = nms_shape;
+ } else {
+ hailo_3d_image_shape_t shape = {
+ info_proto.shape().height(),
+ info_proto.shape().width(),
+ info_proto.shape().features()
+ };
+ info.shape = shape;
+ }
+ hailo_quant_info_t quant_info = {
+ info_proto.quant_info().qp_zp(),
+ info_proto.quant_info().qp_scale(),
+ info_proto.quant_info().limvals_min(),
+ info_proto.quant_info().limvals_max()
+ };
+ info.quant_info = quant_info;
+ return info;
+}
+
+Expected<std::vector<hailo_vstream_info_t>> deserialize_vstream_infos(const ConfiguredNetworkGroup_get_vstream_infos_Reply &reply)
+{
+ std::vector<hailo_vstream_info_t> result;
+ result.reserve(reply.vstream_infos().size());
+ for (auto& info_proto : reply.vstream_infos()) {
+ auto info = deserialize_vstream_info(info_proto);
+ result.push_back(info);
+ }
+ return result;
+}
+
+Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_input_vstream_infos(uint32_t handle,
+ std::string network_name)
+{
+ ConfiguredNetworkGroup_get_vstream_infos_Request request;
+ request.set_handle(handle);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_input_vstream_infos(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return deserialize_vstream_infos(reply);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_infos(uint32_t handle,
+ std::string network_name)
+{
+ ConfiguredNetworkGroup_get_vstream_infos_Request request;
+ request.set_handle(handle);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_output_vstream_infos(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return deserialize_vstream_infos(reply);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_vstream_infos(uint32_t handle,
+ std::string network_name)
+{
+ ConfiguredNetworkGroup_get_vstream_infos_Request request;
+ request.set_handle(handle);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_all_vstream_infos(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return deserialize_vstream_infos(reply);
+}
+
+Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_scheduled(uint32_t handle)
+{
+ ConfiguredNetworkGroup_is_scheduled_Request request;
+ ConfiguredNetworkGroup_is_scheduled_Reply reply;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_is_scheduled(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return reply.is_scheduled();
+}
+
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_timeout(uint32_t handle,
+ const std::chrono::milliseconds &timeout, const std::string &network_name)
+{
+ ConfiguredNetworkGroup_set_scheduler_timeout_Request request;
+ request.set_handle(handle);
+ request.set_timeout_ms(static_cast<uint32_t>(timeout.count()));
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_set_scheduler_timeout_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_set_scheduler_timeout(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_threshold(uint32_t handle, uint32_t threshold,
+ const std::string &network_name)
+{
+ ConfiguredNetworkGroup_set_scheduler_threshold_Request request;
+ request.set_handle(handle);
+ request.set_threshold(threshold);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_set_scheduler_threshold_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_set_scheduler_threshold(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_priority(uint32_t handle, uint8_t priority,
+ const std::string &network_name)
+{
+ ConfiguredNetworkGroup_set_scheduler_priority_Request request;
+ request.set_handle(handle);
+ request.set_priority(priority);
+ request.set_network_name(network_name);
+
+ ConfiguredNetworkGroup_set_scheduler_priority_Reply reply;
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_set_scheduler_priority(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+Expected<LatencyMeasurementResult> HailoRtRpcClient::ConfiguredNetworkGroup_get_latency_measurement(uint32_t handle,
+ const std::string &network_name)
+{
+ ConfiguredNetworkGroup_get_latency_measurement_Request request;
+ ConfiguredNetworkGroup_get_latency_measurement_Reply reply;
+ request.set_handle(handle);
+ request.set_network_name(network_name);
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_latency_measurement(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ LatencyMeasurementResult result{
+ std::chrono::nanoseconds(reply.avg_hw_latency())
+ };
+ return result;
+}
+
+Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_multi_context(uint32_t handle)
+{
+ ConfiguredNetworkGroup_is_multi_context_Request request;
+ ConfiguredNetworkGroup_is_multi_context_Reply reply;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_is_multi_context(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return reply.is_multi_context();
+}
+
+Expected<ConfigureNetworkParams> HailoRtRpcClient::ConfiguredNetworkGroup_get_config_params(uint32_t handle)
+{
+ ConfiguredNetworkGroup_get_config_params_Request request;
+ ConfiguredNetworkGroup_get_config_params_Reply reply;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ grpc::Status status = m_stub->ConfiguredNetworkGroup_get_config_params(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto proto_configure_params = reply.params();
+ ConfigureNetworkParams network_configure_params{};
+ network_configure_params.batch_size = static_cast<uint16_t>(proto_configure_params.batch_size());
+ network_configure_params.power_mode = static_cast<hailo_power_mode_t>(proto_configure_params.power_mode());
+ network_configure_params.latency = static_cast<hailo_latency_measurement_flags_t>(proto_configure_params.latency());
+ for (auto &proto_name_streams_params_pair : proto_configure_params.stream_params_map()) {
+ auto proto_streams_params = proto_name_streams_params_pair.params();
+ auto stream_direction = static_cast<hailo_stream_direction_t>(proto_streams_params.direction());
+ hailo_stream_parameters_t stream_params{};
+ stream_params.stream_interface = static_cast<hailo_stream_interface_t>(proto_streams_params.stream_interface());
+ stream_params.direction = stream_direction;
+ stream_params.flags = static_cast<hailo_stream_flags_t>(proto_streams_params.flags());
+ if (stream_direction == HAILO_H2D_STREAM) {
+ stream_params.pcie_input_params = {0};
+ } else {
+ stream_params.pcie_output_params = {0};
+ }
+ network_configure_params.stream_params_by_name.insert({proto_name_streams_params_pair.name(), stream_params});
+ }
+ for (auto &proto_name_network_params_pair : proto_configure_params.network_params_map()) {
+ auto proto_network_params = proto_name_network_params_pair.params();
+ hailo_network_parameters_t net_params {
+ static_cast<uint16_t>(proto_network_params.batch_size())
+ };
+
+ network_configure_params.network_params_by_name.insert({proto_name_network_params_pair.name(), net_params});
+ }
+ return network_configure_params;
+}
+
+hailo_status HailoRtRpcClient::InputVStream_write(uint32_t handle, const MemoryView &buffer)
+{
+ InputVStream_write_Request request;
+ request.set_handle(handle);
+ request.set_data(buffer.data(), buffer.size());
+ grpc::ClientContext context;
+ InputVStream_write_Reply reply;
+ grpc::Status status = m_stub->InputVStream_write(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ if (reply.status() == HAILO_STREAM_ABORTED_BY_USER) {
+ return static_cast<hailo_status>(reply.status());
+ }
+ CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+ return HAILO_SUCCESS;
+}
+
+hailo_status HailoRtRpcClient::OutputVStream_read(uint32_t handle, MemoryView buffer)
+{
+ OutputVStream_read_Request request;
+ request.set_handle(handle);
+ request.set_size(static_cast<uint32_t>(buffer.size()));
+ grpc::ClientContext context;
+ OutputVStream_read_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_read(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ if (reply.status() == HAILO_STREAM_ABORTED_BY_USER) {
+ return static_cast<hailo_status>(reply.status());
+ }
+ CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+ memcpy(buffer.data(), reply.data().data(), buffer.size());
+ return HAILO_SUCCESS;
+}
+
+Expected<size_t> HailoRtRpcClient::InputVStream_get_frame_size(uint32_t handle)
+{
+ VStream_get_frame_size_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_get_frame_size_Reply reply;
+ grpc::Status status = m_stub->InputVStream_get_frame_size(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return reply.frame_size();
+}
+
+Expected<size_t> HailoRtRpcClient::OutputVStream_get_frame_size(uint32_t handle)
+{
+ VStream_get_frame_size_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_get_frame_size_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_get_frame_size(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ return reply.frame_size();
+}
+
+hailo_status HailoRtRpcClient::InputVStream_flush(uint32_t handle)
+{
+ InputVStream_flush_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ InputVStream_flush_Reply reply;
+ grpc::Status status = m_stub->InputVStream_flush(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+Expected<std::string> HailoRtRpcClient::InputVStream_name(uint32_t handle)
+{
+ VStream_name_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_name_Reply reply;
+ grpc::Status status = m_stub->InputVStream_name(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto name = reply.name();
+ return name;
+}
+
+Expected<std::string> HailoRtRpcClient::OutputVStream_name(uint32_t handle)
+{
+ VStream_name_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_name_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_name(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto name = reply.name();
+ return name;
+}
+
+Expected<std::string> HailoRtRpcClient::InputVStream_network_name(uint32_t handle)
+{
+ VStream_network_name_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_network_name_Reply reply;
+ grpc::Status status = m_stub->InputVStream_network_name(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto name = reply.network_name();
+ return name;
+}
+
+Expected<std::string> HailoRtRpcClient::OutputVStream_network_name(uint32_t handle)
+{
+ VStream_network_name_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_network_name_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_network_name(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto name = reply.network_name();
+ return name;
+}
+
+hailo_status HailoRtRpcClient::InputVStream_abort(uint32_t handle)
+{
+ VStream_abort_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_abort_Reply reply;
+ grpc::Status status = m_stub->InputVStream_abort(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+hailo_status HailoRtRpcClient::OutputVStream_abort(uint32_t handle)
+{
+ VStream_abort_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_abort_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_abort(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+hailo_status HailoRtRpcClient::InputVStream_resume(uint32_t handle)
+{
+ VStream_resume_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_resume_Reply reply;
+ grpc::Status status = m_stub->InputVStream_resume(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+hailo_status HailoRtRpcClient::OutputVStream_resume(uint32_t handle)
+{
+ VStream_resume_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_resume_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_resume(&context, request, &reply);
+ CHECK_GRPC_STATUS(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ return static_cast<hailo_status>(reply.status());
+}
+
+Expected<hailo_format_t> HailoRtRpcClient::InputVStream_get_user_buffer_format(uint32_t handle)
+{
+ VStream_get_user_buffer_format_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_get_user_buffer_format_Reply reply;
+ grpc::Status status = m_stub->InputVStream_get_user_buffer_format(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+
+ auto user_buffer_format_proto = reply.user_buffer_format();
+ hailo_format_t format{
+ static_cast<hailo_format_type_t>(user_buffer_format_proto.type()),
+ static_cast<hailo_format_order_t>(user_buffer_format_proto.order()),
+ static_cast<hailo_format_flags_t>(user_buffer_format_proto.flags())
+ };
+
+ return format;
+}
+
+Expected<hailo_format_t> HailoRtRpcClient::OutputVStream_get_user_buffer_format(uint32_t handle)
+{
+ VStream_get_user_buffer_format_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_get_user_buffer_format_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_get_user_buffer_format(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+
+ auto user_buffer_format_proto = reply.user_buffer_format();
+ hailo_format_t format{
+ static_cast<hailo_format_type_t>(user_buffer_format_proto.type()),
+ static_cast<hailo_format_order_t>(user_buffer_format_proto.order()),
+ static_cast<hailo_format_flags_t>(user_buffer_format_proto.flags())
+ };
+
+ return format;
+}
+
+Expected<hailo_vstream_info_t> HailoRtRpcClient::InputVStream_get_info(uint32_t handle)
+{
+ VStream_get_info_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_get_info_Reply reply;
+ grpc::Status status = m_stub->InputVStream_get_info(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto info_proto = reply.vstream_info();
+ return deserialize_vstream_info(info_proto);
+}
+Expected<hailo_vstream_info_t> HailoRtRpcClient::OutputVStream_get_info(uint32_t handle)
+{
+ VStream_get_info_Request request;
+ request.set_handle(handle);
+ grpc::ClientContext context;
+ VStream_get_info_Reply reply;
+ grpc::Status status = m_stub->OutputVStream_get_info(&context, request, &reply);
+ CHECK_GRPC_STATUS_AS_EXPECTED(status);
+ assert(reply.status() < HAILO_STATUS_COUNT);
+ CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+ auto info_proto = reply.vstream_info();
+ return deserialize_vstream_info(info_proto);
+}
+
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_rpc_client.hpp
+ * @brief TODO
+ **/
+
+#ifndef HAILO_HAILORT_RPC_CLIENT_HPP_
+#define HAILO_HAILORT_RPC_CLIENT_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4244 4267 4127)
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+#endif
+#include <grpcpp/grpcpp.h>
+#include "hailort_rpc.grpc.pb.h"
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#else
+#pragma GCC diagnostic pop
+#endif
+#include <memory>
+
+
+namespace hailort
+{
+
+class HailoRtRpcClient final {
+public:
+ HailoRtRpcClient(std::shared_ptr<grpc::Channel> channel)
+ : m_stub(ProtoHailoRtRpc::NewStub(channel)) {}
+
+ hailo_status client_keep_alive(uint32_t pid);
+ Expected<hailo_version_t> get_service_version();
+
+ Expected<uint32_t> VDevice_create(const hailo_vdevice_params_t ¶ms, uint32_t pid);
+ Expected<uint32_t> VDevice_dup_handle(uint32_t pid, uint32_t handle);
+ hailo_status VDevice_release(uint32_t handle);
+ Expected<std::vector<std::string>> VDevice_get_physical_devices_ids(uint32_t handle);
+ Expected<hailo_stream_interface_t> VDevice_get_default_streams_interface(uint32_t handle);
+ Expected<std::vector<uint32_t>> VDevice_configure(uint32_t vdevice_handle, const Hef &hef, uint32_t pid, const NetworkGroupsParamsMap &configure_params={});
+
+ Expected<uint32_t> ConfiguredNetworkGroup_dup_handle(uint32_t pid, uint32_t handle);
+ hailo_status ConfiguredNetworkGroup_release(uint32_t handle);
+ Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_input_vstream_params(uint32_t handle,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name);
+ Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_output_vstream_params(uint32_t handle,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name);
+ Expected<std::string> ConfiguredNetworkGroup_get_network_group_name(uint32_t handle);
+ Expected<std::string> ConfiguredNetworkGroup_name(uint32_t handle);
+ Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroup_get_network_infos(uint32_t handle);
+ Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroup_get_all_stream_infos(uint32_t handle, const std::string &network_name);
+ Expected<hailo_stream_interface_t> ConfiguredNetworkGroup_get_default_stream_interface(uint32_t handle);
+ Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroup_make_output_vstream_params_groups(uint32_t handle,
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
+ Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroup_get_output_vstream_groups(uint32_t handle);
+ Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_input_vstream_infos(uint32_t handle, std::string network_name);
+ Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_output_vstream_infos(uint32_t handle, std::string network_name);
+ Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_all_vstream_infos(uint32_t handle, std::string network_name);
+ Expected<bool> ConfiguredNetworkGroup_is_scheduled(uint32_t handle);
+ hailo_status ConfiguredNetworkGroup_set_scheduler_timeout(uint32_t handle, const std::chrono::milliseconds &timeout,
+ const std::string &network_name);
+ hailo_status ConfiguredNetworkGroup_set_scheduler_threshold(uint32_t handle, uint32_t threshold, const std::string &network_name);
+ hailo_status ConfiguredNetworkGroup_set_scheduler_priority(uint32_t handle, uint8_t priority, const std::string &network_name);
+ Expected<LatencyMeasurementResult> ConfiguredNetworkGroup_get_latency_measurement(uint32_t handle, const std::string &network_name);
+ Expected<bool> ConfiguredNetworkGroup_is_multi_context(uint32_t handle);
+ Expected<ConfigureNetworkParams> ConfiguredNetworkGroup_get_config_params(uint32_t handle);
+
+ Expected<std::vector<uint32_t>> InputVStreams_create(uint32_t net_group_handle,
+ const std::map<std::string, hailo_vstream_params_t> &inputs_params, uint32_t pid);
+ Expected<uint32_t> InputVStream_dup_handle(uint32_t pid, uint32_t handle);
+ Expected<uint32_t> OutputVStream_dup_handle(uint32_t pid, uint32_t handle);
+ hailo_status InputVStream_release(uint32_t handle);
+ Expected<std::vector<uint32_t>> OutputVStreams_create(uint32_t net_group_handle,
+ const std::map<std::string, hailo_vstream_params_t> &output_params, uint32_t pid);
+ hailo_status OutputVStream_release(uint32_t handle);
+ hailo_status InputVStream_write(uint32_t handle, const MemoryView &buffer);
+ hailo_status OutputVStream_read(uint32_t handle, MemoryView buffer);
+ Expected<size_t> InputVStream_get_frame_size(uint32_t handle);
+ Expected<size_t> OutputVStream_get_frame_size(uint32_t handle);
+
+ hailo_status InputVStream_flush(uint32_t handle);
+
+ Expected<std::string> InputVStream_name(uint32_t handle);
+ Expected<std::string> OutputVStream_name(uint32_t handle);
+
+ Expected<std::string> InputVStream_network_name(uint32_t handle);
+ Expected<std::string> OutputVStream_network_name(uint32_t handle);
+
+ hailo_status InputVStream_abort(uint32_t handle);
+ hailo_status OutputVStream_abort(uint32_t handle);
+ hailo_status InputVStream_resume(uint32_t handle);
+ hailo_status OutputVStream_resume(uint32_t handle);
+
+ Expected<hailo_format_t> InputVStream_get_user_buffer_format(uint32_t handle);
+ Expected<hailo_format_t> OutputVStream_get_user_buffer_format(uint32_t handle);
+
+ Expected<hailo_vstream_info_t> InputVStream_get_info(uint32_t handle);
+ Expected<hailo_vstream_info_t> OutputVStream_get_info(uint32_t handle);
+
+private:
+ std::unique_ptr<ProtoHailoRtRpc::Stub> m_stub;
+};
+
+}
+
+#endif // HAILO_HAILORT_RPC_CLIENT_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file network_group_client.cpp
+ * @brief: Network group client object
+ **/
+
+#include "hailo/vstream.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/utils.hpp"
+#include "common/os_utils.hpp"
+
+#include "network_group/network_group_internal.hpp"
+#include "net_flow/pipeline/vstream_internal.hpp"
+#include "rpc/rpc_definitions.hpp"
+#include "rpc_client_utils.hpp"
+
+
+namespace hailort
+{
+
+ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle) :
+ m_client(std::move(client)),
+ m_handle(handle)
+{
+ auto reply = m_client->ConfiguredNetworkGroup_name(m_handle);
+ if (!reply) {
+ LOGGER__ERROR("get_network_group_name failed with status {}", reply.status());
+ return;
+ }
+ m_network_group_name = reply.value();
+}
+
+ConfiguredNetworkGroupClient::~ConfiguredNetworkGroupClient()
+{
+ auto reply = m_client->ConfiguredNetworkGroup_release(m_handle);
+ if (reply != HAILO_SUCCESS) {
+ LOGGER__CRITICAL("ConfiguredNetworkGroup_release failed with status: {}", reply);
+ }
+}
+
+hailo_status ConfiguredNetworkGroupClient::before_fork()
+{
+ m_client.reset();
+ return HAILO_SUCCESS;
+}
+
+hailo_status ConfiguredNetworkGroupClient::create_client()
+{
+ auto expected_client = HailoRtRpcClientUtils::create_client();
+ CHECK_EXPECTED_AS_STATUS(expected_client);
+ m_client = expected_client.release();
+ return HAILO_SUCCESS;
+}
+
+hailo_status ConfiguredNetworkGroupClient::after_fork_in_parent()
+{
+ return create_client();
+}
+
+hailo_status ConfiguredNetworkGroupClient::after_fork_in_child()
+{
+ auto status = create_client();
+ CHECK_SUCCESS(status);
+ auto expected_dup_handle = m_client->ConfiguredNetworkGroup_dup_handle(OsUtils::get_curr_pid(), m_handle);
+ CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
+ m_handle = expected_dup_handle.value();
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupClient::activate(
+ const hailo_activate_network_group_params_t &/* network_group_params */)
+{
+ LOGGER__WARNING("ConfiguredNetworkGroup::activate function is not supported when using multi-process service or HailoRT Scheduler.");
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+/* Network group base functions */
+Expected<LatencyMeasurementResult> ConfiguredNetworkGroupClient::get_latency_measurement(const std::string &network_name)
+{
+ return m_client->ConfiguredNetworkGroup_get_latency_measurement(m_handle, network_name);
+}
+
+const std::string &ConfiguredNetworkGroupClient::get_network_group_name() const
+{
+ return m_network_group_name;
+}
+
+const std::string &ConfiguredNetworkGroupClient::name() const
+{
+ return m_network_group_name;
+}
+
+Expected<hailo_stream_interface_t> ConfiguredNetworkGroupClient::get_default_streams_interface()
+{
+ return m_client->ConfiguredNetworkGroup_get_default_stream_interface(m_handle);
+}
+
+std::vector<std::reference_wrapper<InputStream>> ConfiguredNetworkGroupClient::get_input_streams_by_interface(hailo_stream_interface_t)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_input_streams_by_interface function is not supported when using multi-process service");
+ std::vector<std::reference_wrapper<InputStream>> empty_vec;
+ return empty_vec;
+}
+
+std::vector<std::reference_wrapper<OutputStream>> ConfiguredNetworkGroupClient::get_output_streams_by_interface(hailo_stream_interface_t)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams_by_interface function is not supported when using multi-process service");
+ std::vector<std::reference_wrapper<OutputStream>> empty_vec;
+ return empty_vec;
+}
+
+ExpectedRef<InputStream> ConfiguredNetworkGroupClient::get_input_stream_by_name(const std::string&)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_input_stream_by_name function is not supported when using multi-process service");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+ExpectedRef<OutputStream> ConfiguredNetworkGroupClient::get_output_stream_by_name(const std::string&)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_output_stream_by_name function is not supported when using multi-process service");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+Expected<InputStreamRefVector> ConfiguredNetworkGroupClient::get_input_streams_by_network(const std::string&)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_input_streams_by_network function is not supported when using multi-process service");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+Expected<OutputStreamRefVector> ConfiguredNetworkGroupClient::get_output_streams_by_network(const std::string&)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams_by_network function is not supported when using multi-process service");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+InputStreamRefVector ConfiguredNetworkGroupClient::get_input_streams()
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_input_streams function is not supported when using multi-process service");
+ InputStreamRefVector empty_vec;
+ return empty_vec;
+}
+
+OutputStreamRefVector ConfiguredNetworkGroupClient::get_output_streams()
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams function is not supported when using multi-process service");
+ OutputStreamRefVector empty_vec;
+ return empty_vec;
+}
+
+Expected<OutputStreamWithParamsVector> ConfiguredNetworkGroupClient::get_output_streams_from_vstream_names(const std::map<std::string, hailo_vstream_params_t>&)
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_output_streams_from_vstream_names function is not supported when using multi-process service");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status ConfiguredNetworkGroupClient::wait_for_activation(const std::chrono::milliseconds&)
+{
+ LOGGER__WARNING("ConfiguredNetworkGroup::wait_for_activation function is not supported when using multi-process service or HailoRT Scheduler.");
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroupClient::get_output_vstream_groups()
+{
+ return m_client->ConfiguredNetworkGroup_get_output_vstream_groups(m_handle);
+}
+
+Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroupClient::make_output_vstream_params_groups(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+{
+ return m_client->ConfiguredNetworkGroup_make_output_vstream_params_groups(m_handle,
+ quantized, format_type, timeout_ms, queue_size);
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupClient::make_input_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ return m_client->ConfiguredNetworkGroup_make_input_vstream_params(m_handle,
+ quantized, format_type, timeout_ms, queue_size, network_name);
+}
+
+Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupClient::make_output_vstream_params(
+ bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+ const std::string &network_name)
+{
+ return m_client->ConfiguredNetworkGroup_make_output_vstream_params(m_handle,
+ quantized, format_type, timeout_ms, queue_size, network_name);
+}
+
+Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroupClient::get_all_stream_infos(const std::string &network_name) const
+{
+ return m_client->ConfiguredNetworkGroup_get_all_stream_infos(m_handle, network_name);
+}
+
+Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroupClient::get_network_infos() const
+{
+ return m_client->ConfiguredNetworkGroup_get_network_infos(m_handle);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_input_vstream_infos(
+ const std::string &network_name) const
+{
+ return m_client->ConfiguredNetworkGroup_get_input_vstream_infos(m_handle, network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_output_vstream_infos(
+ const std::string &network_name) const
+{
+ return m_client->ConfiguredNetworkGroup_get_output_vstream_infos(m_handle, network_name);
+}
+
+Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_all_vstream_infos(
+ const std::string &network_name) const
+{
+ return m_client->ConfiguredNetworkGroup_get_all_vstream_infos(m_handle, network_name);
+}
+
+bool ConfiguredNetworkGroupClient::is_scheduled() const
+{
+ auto reply = m_client->ConfiguredNetworkGroup_is_scheduled(m_handle);
+ if (reply.status() != HAILO_SUCCESS) {
+ LOGGER__ERROR("is_scheduled failed with status {}", reply.status());
+ return false;
+ }
+ return reply.value();
+}
+
+hailo_status ConfiguredNetworkGroupClient::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
+{
+ return m_client->ConfiguredNetworkGroup_set_scheduler_timeout(m_handle, timeout, network_name);
+}
+
+hailo_status ConfiguredNetworkGroupClient::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
+{
+ return m_client->ConfiguredNetworkGroup_set_scheduler_threshold(m_handle, threshold, network_name);
+}
+
+hailo_status ConfiguredNetworkGroupClient::set_scheduler_priority(uint8_t priority, const std::string &network_name)
+{
+ return m_client->ConfiguredNetworkGroup_set_scheduler_priority(m_handle, priority, network_name);
+}
+
+AccumulatorPtr ConfiguredNetworkGroupClient::get_activation_time_accumulator() const
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_activation_time_accumulator function is not supported when using multi-process service");
+ return AccumulatorPtr();
+}
+
+AccumulatorPtr ConfiguredNetworkGroupClient::get_deactivation_time_accumulator() const
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_deactivation_time_accumulator function is not supported when using multi-process service");
+ return AccumulatorPtr();
+}
+
+bool ConfiguredNetworkGroupClient::is_multi_context() const
+{
+ auto reply = m_client->ConfiguredNetworkGroup_is_multi_context(m_handle);
+ if (reply.status() != HAILO_SUCCESS) {
+ LOGGER__ERROR("is_multi_context failed with status {}", reply.status());
+ return false;
+ }
+ return reply.value();
+}
+
+const ConfigureNetworkParams ConfiguredNetworkGroupClient::get_config_params() const
+{
+ auto reply = m_client->ConfiguredNetworkGroup_get_config_params(m_handle);
+ if (reply.status() != HAILO_SUCCESS) {
+ LOGGER__ERROR("get_config_params failed with status {}", reply.status());
+ return ConfigureNetworkParams();
+ }
+ return reply.value();
+}
+
+Expected<std::vector<InputVStream>> ConfiguredNetworkGroupClient::create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params)
+{
+ auto reply = m_client->InputVStreams_create(m_handle, inputs_params, OsUtils::get_curr_pid());
+ CHECK_EXPECTED(reply);
+ auto input_vstreams_handles = reply.release();
+ std::vector<InputVStream> vstreams;
+ vstreams.reserve(input_vstreams_handles.size());
+
+ for (uint32_t handle : input_vstreams_handles) {
+ auto vstream_client = InputVStreamClient::create(handle);
+ CHECK_EXPECTED(vstream_client);
+ auto vstream = VStreamsBuilderUtils::create_input(vstream_client.release());
+ vstreams.push_back(std::move(vstream));
+ }
+ return vstreams;
+}
+
+Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupClient::create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params)
+{
+ auto reply = m_client->OutputVStreams_create(m_handle, outputs_params, OsUtils::get_curr_pid());
+ CHECK_EXPECTED(reply);
+ auto output_vstreams_handles = reply.release();
+ std::vector<OutputVStream> vstreams;
+ vstreams.reserve(output_vstreams_handles.size());
+
+ for(uint32_t handle : output_vstreams_handles) {
+ auto vstream_client = OutputVStreamClient::create(handle);
+ CHECK_EXPECTED(vstream_client);
+ auto vstream = VStreamsBuilderUtils::create_output(vstream_client.release());
+ vstreams.push_back(std::move(vstream));
+ }
+ return vstreams;
+}
+
+} /* namespace hailort */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_common.hpp
+ * @brief Utility functions for rpc client communication
+ **/
+
+#ifndef _HAILO_HAILORT_RPC_CLIENT_UTILS_HPP_
+#define _HAILO_HAILORT_RPC_CLIENT_UTILS_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/async_thread.hpp"
+#include "common/os_utils.hpp"
+
+#include "hailort_rpc_client.hpp"
+#include "rpc/rpc_definitions.hpp"
+
+#include <chrono>
+
+namespace hailort
+{
+
+class HailoRtRpcClientUtils final
+{
+public:
+ static HailoRtRpcClientUtils& get_instance()
+ {
+ static HailoRtRpcClientUtils instance;
+ return instance;
+ }
+
+ HailoRtRpcClientUtils()
+ : m_mutex(std::make_shared<std::mutex>())
+ , m_forking(false)
+ {}
+
+ static Expected<std::unique_ptr<HailoRtRpcClient>> create_client()
+ {
+ auto channel = grpc::CreateChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials());
+ CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK_NOT_NULL_AS_EXPECTED(client, HAILO_INTERNAL_FAILURE);
+ return client;
+ }
+
+ hailo_status init_client_service_communication()
+ {
+ std::unique_lock<std::mutex> lock(*m_mutex);
+ if (!m_initialized) {
+ // Create client
+ auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials());
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ // Check service version
+ auto reply = client->get_service_version();
+ CHECK_EXPECTED_AS_STATUS(reply);
+ hailo_version_t client_version = {};
+ auto status = hailo_get_library_version(&client_version);
+ CHECK_SUCCESS(status);
+ auto service_version = reply.value();
+ auto are_equal = [](auto version1, auto version2) {
+ return version1.major == version2.major
+ && version1.minor == version2.minor
+ && version1.revision == version2.revision;
+ };
+ CHECK(are_equal(service_version, client_version), HAILO_INVALID_SERVICE_VERSION, "Invalid libhailort version on service: "
+ "client version {}.{}.{}, service version {}.{}.{}",
+ service_version.major, service_version.minor, service_version.revision,
+ client_version.major, client_version.minor, client_version.revision);
+
+ // Set pid
+ m_pid = OsUtils::get_curr_pid();
+
+ // Trigger client keep-alive
+ m_keep_alive_thread = make_unique_nothrow<AsyncThread<hailo_status>>([this] () {
+ return this->keep_alive();
+ });
+ CHECK(nullptr != m_keep_alive_thread, HAILO_OUT_OF_HOST_MEMORY);
+ m_initialized = true;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ hailo_status before_fork()
+ {
+ m_forking = true;
+ return m_keep_alive_thread->get();
+ }
+
+ hailo_status after_fork_in_parent()
+ {
+ m_forking = false;
+ std::unique_lock<std::mutex> lock(*m_mutex);
+ if (m_initialized) {
+ // Trigger client keep-alive
+ m_keep_alive_thread = make_unique_nothrow<AsyncThread<hailo_status>>([this] () {
+ return this->keep_alive();
+ });
+ }
+ return HAILO_SUCCESS;
+ }
+
+ hailo_status after_fork_in_child()
+ {
+ m_forking = false;
+ m_mutex = std::make_shared<std::mutex>();
+ std::unique_lock<std::mutex> lock(*m_mutex);
+ if (m_initialized) {
+ m_pid = OsUtils::get_curr_pid();
+ // Trigger client keep-alive
+ m_keep_alive_thread = make_unique_nothrow<AsyncThread<hailo_status>>([this] () {
+ return this->keep_alive();
+ });
+ }
+ return HAILO_SUCCESS;
+ }
+
+private:
+ ~HailoRtRpcClientUtils()
+ {
+ m_keep_alive_thread.release();
+ }
+
+ hailo_status keep_alive()
+ {
+ auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials());
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+ while (!m_forking) {
+ auto status = client->client_keep_alive(m_pid);
+ CHECK_SUCCESS(status);
+ std::this_thread::sleep_for(hailort::HAILO_KEEPALIVE_INTERVAL / 2);
+ }
+ return HAILO_SUCCESS;
+ }
+
+ std::shared_ptr<std::mutex> m_mutex;
+ AsyncThreadPtr<hailo_status> m_keep_alive_thread;
+ bool m_initialized = false;
+ std::atomic<bool> m_forking;
+ uint32_t m_pid;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_HAILORT_RPC_CLIENT_UTILS_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file shared_resource_manager.hpp
- * @brief holds and manages shared resource objects mapped by a key.
- *
- **/
-
-#ifndef HAILO_SHARED_RESOURCE_MANAGER_HPP_
-#define HAILO_SHARED_RESOURCE_MANAGER_HPP_
-
-#include "hailo/expected.hpp"
-#include "common/utils.hpp"
-
-#include <vector>
-#include <unordered_map>
-#include <memory>
-#include <mutex>
-#include <typeinfo>
-
-namespace hailort
-{
-
-#define HAILO_MAX_SHARED_RESOURCES (32)
-#define HAILO_UNIQUE_RESOURCE_KEY (0)
-
-template<class Key, class T>
-struct ResourceRef {
- ResourceRef(Key user_key, std::shared_ptr<T> resource)
- : user_key(user_key), count(0), resource(std::move(resource))
- {}
-
- Key user_key;
- uint32_t count;
- std::shared_ptr<T> resource;
-};
-
-template<class Key, class T>
-class SharedResourceManager
-{
-public:
- static SharedResourceManager& get_instance()
- {
- static SharedResourceManager instance;
- return instance;
- }
-
- Expected<std::shared_ptr<T>> resource_lookup(uint32_t handle)
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- auto resource = m_resources.at(handle)->resource;
- return resource;
- }
-
- template<class CreateFunc>
- Expected<uint32_t> register_resource(Key user_key, CreateFunc create)
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- uint32_t available_index = static_cast<uint32_t>(m_resources.size());
- uint32_t match_index = static_cast<uint32_t>(m_resources.size());
- for (uint32_t i = 0; i < m_resources.size(); ++i) {
- if (m_resources.at(i) == nullptr) {
- available_index = i;
- } else {
- if (m_resources.at(i)->user_key == user_key) {
- // Resource already registered
- match_index = i;
- break;
- }
- }
- }
- bool should_create = match_index == m_resources.size() || user_key == unique_key();
- CHECK_AS_EXPECTED(available_index < m_resources.size() || !should_create, HAILO_NOT_AVAILABLE,
- "Tried to create more than {} shared resources of type {}", max_resources(), typeid(T).name());
- if (should_create) {
- // Create a new resource and register
- auto expected_resource = create();
- CHECK_EXPECTED(expected_resource);
- m_resources.at(available_index) = std::make_shared<ResourceRef<Key, T>>(user_key, expected_resource.release());
- m_resources.at(available_index)->count++;
- return available_index;
- }
- m_resources.at(match_index)->count++;
- return match_index;
- }
-
- void release_resource(uint32_t handle)
- {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_resources.at(handle)->count--;
- if (!m_resources.at(handle)->count) {
- m_resources.at(handle) = nullptr;
- }
- }
-
-private:
- SharedResourceManager()
- : m_resources(max_resources(), nullptr)
- {}
-
- static uint32_t max_resources()
- {
- // This method can be "overriden" with template specialization
- // to set another MAX for specific managers.
- return HAILO_MAX_SHARED_RESOURCES;
- }
-
- static Key unique_key()
- {
- // This method can be "overriden" with template specialization
- // to set another UNIQUE for specific managers.
- return HAILO_UNIQUE_RESOURCE_KEY;
- }
-
- std::mutex m_mutex;
- std::vector<std::shared_ptr<ResourceRef<Key, T>>> m_resources;
-};
-
-}
-
-#endif /* HAILO_SHARED_RESOURCE_MANAGER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file stream.cpp
- * @brief Implementation of stream abstraction
- **/
-
-#include "hailo/stream.hpp"
-#include "hailo/hailort.h"
-#include "hailo/hailort_common.hpp"
-#include "hailo/transform.hpp"
-#include "common/utils.hpp"
-
-#include <sstream>
-
-namespace hailort
-{
-
-hailo_status InputStream::flush()
-{
- return HAILO_SUCCESS;
-}
-
-hailo_status InputStream::write(const MemoryView &buffer)
-{
- CHECK((buffer.size() % get_info().hw_frame_size) == 0, HAILO_INVALID_ARGUMENT,
- "write size {} must be a multiple of hw size {}", buffer.size(), get_info().hw_frame_size);
-
- CHECK(((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0), HAILO_INVALID_ARGUMENT,
- "Input must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
-
- return sync_write_all_raw_buffer_no_transform_impl(const_cast<uint8_t*>(buffer.data()), 0, buffer.size());
-}
-
-std::string InputStream::to_string() const
-{
- std::stringstream string_stream;
- string_stream << "InputStream(index=" << static_cast<uint32_t>(get_info().index)
- << ", name=" << get_info().name << ")";
- return string_stream.str();
-}
-
-OutputStream::OutputStream(OutputStream &&other) : m_stream_info(std::move(other.get_info())),
- m_dataflow_manager_id(std::move(other.m_dataflow_manager_id)),
- m_invalid_frames_count(static_cast<uint32_t>(other.m_invalid_frames_count))
-{}
-
-hailo_status OutputStream::read_nms(void *buffer, size_t offset, size_t size)
-{
- uint32_t num_of_classes = get_info().nms_info.number_of_classes;
- uint32_t max_bboxes_per_class = get_info().nms_info.max_bboxes_per_class;
- uint32_t chunks_per_frame = get_info().nms_info.chunks_per_frame;
- size_t bbox_size = get_info().nms_info.bbox_size;
- size_t transfer_size = bbox_size;
-
- CHECK(size == get_info().hw_frame_size, HAILO_INSUFFICIENT_BUFFER,
- "On nms stream buffer size should be {} (given size {})", get_info().hw_frame_size, size);
-
- for (uint32_t chunk_index = 0; chunk_index < chunks_per_frame; chunk_index++) {
- for (uint32_t class_index = 0; class_index < num_of_classes; class_index++) {
- nms_bbox_counter_t class_bboxes_count = 0;
- nms_bbox_counter_t* class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
- offset += sizeof(*class_bboxes_count_ptr);
-
- // Read bboxes until reaching delimiter
- for (;;) {
- MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, transfer_size);
- auto expected_bytes_read = sync_read_raw_buffer(buffer_view);
- if ((HAILO_STREAM_ABORTED_BY_USER == expected_bytes_read.status()) ||
- ((HAILO_STREAM_NOT_ACTIVATED == expected_bytes_read.status()))) {
- return expected_bytes_read.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_bytes_read, "Failed reading nms bbox");
- transfer_size = expected_bytes_read.release();
- CHECK(transfer_size == bbox_size, HAILO_INTERNAL_FAILURE,
- "Data read from the device was size {}, should be bbox size {}", transfer_size, bbox_size);
-
- if (HailoRTCommon::NMS_DUMMY_DELIMITER == *(uint64_t*)((uint8_t*)buffer + offset)) {
- continue;
- }
-
- if (HailoRTCommon::NMS_DELIMITER == *(uint64_t*)((uint8_t*)buffer + offset)) {
- break;
- }
-
- class_bboxes_count++;
- CHECK(class_bboxes_count <= max_bboxes_per_class, HAILO_INTERNAL_FAILURE,
- "Data read from the device for the current class was size {}, max size is {}", class_bboxes_count, max_bboxes_per_class);
- offset += bbox_size;
- }
-
- *class_bboxes_count_ptr = class_bboxes_count;
- }
- }
- return HAILO_SUCCESS;
-}
-
-hailo_status OutputStream::read(MemoryView buffer)
-{
- CHECK((buffer.size() % get_info().hw_frame_size) == 0, HAILO_INVALID_ARGUMENT,
- "When read size {} must be a multiple of hw size {}", buffer.size(), get_info().hw_frame_size);
-
- if (get_info().format.order == HAILO_FORMAT_ORDER_HAILO_NMS){
- return read_nms(buffer.data(), 0, buffer.size());
- } else {
- return this->read_all(buffer);
- }
-}
-
-std::string OutputStream::to_string() const
-{
- std::stringstream string_stream;
- string_stream << "OutputStream(index=" << static_cast<uint32_t>(get_info().index)
- << ", name=" << get_info().name << ")";
- return string_stream.str();
-}
-
-uint32_t OutputStream::get_invalid_frames_count() const
-{
- return m_invalid_frames_count.load();
-}
-
-void OutputStream::increase_invalid_frames_count(uint32_t value)
-{
- m_invalid_frames_count = m_invalid_frames_count + value;
-}
-
-
-} /* namespace hailort */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/stream.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/stream_internal.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file stream.cpp
+ * @brief Implementation of stream abstraction
+ **/
+
+#include "hailo/stream.hpp"
+#include "hailo/hailort.h"
+#include "hailo/hailort_common.hpp"
+#include "hailo/transform.hpp"
+#include "common/utils.hpp"
+
+#include <sstream>
+
+namespace hailort
+{
+
+hailo_status InputStream::flush()
+{
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputStream::write(const MemoryView &buffer)
+{
+ CHECK((buffer.size() % get_info().hw_frame_size) == 0, HAILO_INVALID_ARGUMENT,
+ "write size {} must be a multiple of hw size {}", buffer.size(), get_info().hw_frame_size);
+
+ CHECK(((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0), HAILO_INVALID_ARGUMENT,
+ "Input must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
+
+ return sync_write_all_raw_buffer_no_transform_impl(const_cast<uint8_t*>(buffer.data()), 0, buffer.size());
+}
+
+hailo_status InputStream::wait_for_ready(size_t /* transfer_size */, std::chrono::milliseconds /* timeout */)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status InputStream::write_async(std::shared_ptr<DmaMappedBuffer> /* buffer */, const TransferDoneCallback &/* user_callback */, void */* opaque */)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+std::string InputStream::to_string() const
+{
+ std::stringstream string_stream;
+ string_stream << "InputStream(index=" << static_cast<uint32_t>(get_info().index)
+ << ", name=" << get_info().name << ")";
+ return string_stream.str();
+}
+
+EventPtr &InputStream::get_network_group_activated_event()
+{
+ LOGGER__WARNING("VDevice InputStream::get_network_group_activated_event() is deprecated.");
+ return get_core_op_activated_event();
+}
+
+hailo_status OutputStream::read_nms(void *buffer, size_t offset, size_t size)
+{
+ uint32_t num_of_classes = get_info().nms_info.number_of_classes;
+ uint32_t max_bboxes_per_class = get_info().nms_info.max_bboxes_per_class;
+ uint32_t chunks_per_frame = get_info().nms_info.chunks_per_frame;
+ size_t bbox_size = get_info().nms_info.bbox_size;
+ size_t transfer_size = bbox_size;
+
+ CHECK(size == get_info().hw_frame_size, HAILO_INSUFFICIENT_BUFFER,
+ "On nms stream buffer size should be {} (given size {})", get_info().hw_frame_size, size);
+
+ for (uint32_t chunk_index = 0; chunk_index < chunks_per_frame; chunk_index++) {
+ for (uint32_t class_index = 0; class_index < num_of_classes; class_index++) {
+ nms_bbox_counter_t class_bboxes_count = 0;
+ nms_bbox_counter_t* class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
+ offset += sizeof(*class_bboxes_count_ptr);
+
+ // Read bboxes until reaching delimiter
+ for (;;) {
+ MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, transfer_size);
+ auto expected_bytes_read = sync_read_raw_buffer(buffer_view);
+ if ((HAILO_STREAM_ABORTED_BY_USER == expected_bytes_read.status()) ||
+ ((HAILO_STREAM_NOT_ACTIVATED == expected_bytes_read.status()))) {
+ return expected_bytes_read.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_bytes_read, "Failed reading nms bbox");
+ transfer_size = expected_bytes_read.release();
+ CHECK(transfer_size == bbox_size, HAILO_INTERNAL_FAILURE,
+ "Data read from the device was size {}, should be bbox size {}", transfer_size, bbox_size);
+
+ if (HailoRTCommon::NMS_DUMMY_DELIMITER == *(uint64_t*)((uint8_t*)buffer + offset)) {
+ continue;
+ }
+
+ if (HailoRTCommon::NMS_DELIMITER == *(uint64_t*)((uint8_t*)buffer + offset)) {
+ break;
+ }
+
+ class_bboxes_count++;
+ CHECK(class_bboxes_count <= max_bboxes_per_class, HAILO_INTERNAL_FAILURE,
+ "Data read from the device for the current class was size {}, max size is {}", class_bboxes_count, max_bboxes_per_class);
+ offset += bbox_size;
+ }
+
+ *class_bboxes_count_ptr = class_bboxes_count;
+ }
+ }
+ return HAILO_SUCCESS;
+}
+
+hailo_status OutputStream::read(MemoryView buffer)
+{
+ CHECK((buffer.size() % get_info().hw_frame_size) == 0, HAILO_INVALID_ARGUMENT,
+ "Read size {} must be a multiple of hw size {}", buffer.size(), get_info().hw_frame_size);
+
+ if (get_info().format.order == HAILO_FORMAT_ORDER_HAILO_NMS){
+ return read_nms(buffer.data(), 0, buffer.size());
+ } else {
+ return this->read_all(buffer);
+ }
+}
+
+hailo_status OutputStream::wait_for_ready(size_t /* transfer_size */, std::chrono::milliseconds /* timeout */)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status OutputStream::read_async(std::shared_ptr<DmaMappedBuffer> /* buffer */, const TransferDoneCallback &/* user_callback */, void */* opaque */)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+
+std::string OutputStream::to_string() const
+{
+ std::stringstream string_stream;
+ string_stream << "OutputStream(index=" << static_cast<uint32_t>(get_info().index)
+ << ", name=" << get_info().name << ")";
+ return string_stream.str();
+}
+
+uint32_t OutputStream::get_invalid_frames_count() const
+{
+ return m_invalid_frames_count.load();
+}
+
+void OutputStream::increase_invalid_frames_count(uint32_t value)
+{
+ m_invalid_frames_count = m_invalid_frames_count + value;
+}
+
+EventPtr &OutputStream::get_network_group_activated_event()
+{
+ LOGGER__WARNING("VDevice OutputStream::get_network_group_activated_event() is deprecated.");
+ return get_core_op_activated_event();
+}
+
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file stream_internal.cpp
+ * @brief Implementation of InputStreamBase and OutputStreamBase
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/transform.hpp"
+
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+
+#include "stream_common/stream_internal.hpp"
+
+
+namespace hailort
+{
+
+InputStreamBase::InputStreamBase(const hailo_stream_info_t &stream_info,
+ const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event) :
+ m_nn_stream_config(nn_stream_config), m_core_op_activated_event(core_op_activated_event)
+{
+ m_stream_info = stream_info;
+}
+
+EventPtr &InputStreamBase::get_core_op_activated_event()
+{
+ return m_core_op_activated_event;
+}
+
+bool InputStreamBase::is_scheduled()
+{
+ return false;
+}
+
+OutputStreamBase::OutputStreamBase(const LayerInfo &layer_info, const hailo_stream_info_t &stream_info,
+ const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event) :
+ m_nn_stream_config(nn_stream_config), m_layer_info(layer_info), m_core_op_activated_event(core_op_activated_event)
+{
+ m_stream_info = stream_info;
+}
+
+EventPtr &OutputStreamBase::get_core_op_activated_event()
+{
+ return m_core_op_activated_event;
+}
+
+bool OutputStreamBase::is_scheduled()
+{
+ return false;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file stream_internal.hpp
+ * @brief Class declaration for InputStreamBase/OutputStreamBase that implement the basic InputStream/OutputStream
+ * "interface" (not technically an interface, but good enough). All internal input/output streams
+ * should inherit from the InputStreamBase/OutputStreamBase classes.
+ * Hence, the hierarchy is as follows:
+ *
+ * InputStream (External "interface")
+ * |-- InputStreamBase (Base class)
+ * |-- VdmaInputStream
+ * |-- EthernetInputStream
+ * |-- MipiInputStream
+ *
+ *
+ * OutputStream (External "interface")
+ * |-- OutputStreamBase (Base class)
+ * |-- VdmaOutputStream
+ * |-- EthernetOutputStream
+ *
+ **/
+
+#ifndef _STREAM_INTERNAL_HPP_
+#define _STREAM_INTERNAL_HPP_
+
+#include "hailo/stream.hpp"
+#include "hailo/event.hpp"
+#include "hailo/hailort_common.hpp"
+
+#include "hef/hef_internal.hpp"
+#include "device_common/control_protocol.hpp"
+#include "hef/layer_info.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+
+
+namespace hailort
+{
+
+typedef struct hailo_mux_info_t{
+ hailo_stream_info_t info;
+ uint32_t row_size;
+ uint32_t row_counter;
+ uint32_t rows_gcd;
+ uint32_t offset;
+ uint32_t current_offset; // Current demuxing offset
+ uint32_t successors_count;
+ struct hailo_mux_info_t *successors[HailoRTCommon::MAX_MUX_PREDECESSORS];
+ void* buffer;
+} hailo_mux_info_t;
+
+class InputStreamWrapper;
+class OutputStreamWrapper;
+
+class InputStreamBase : public InputStream
+{
+public:
+ virtual ~InputStreamBase() = default;
+
+ virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config()
+ {
+ return m_nn_stream_config;
+ };
+
+ virtual hailo_status send_pending_buffer(size_t device_index = 0)
+ {
+ (void)device_index;
+ return HAILO_INVALID_OPERATION;
+ }
+
+ virtual Expected<size_t> get_buffer_frames_size() const
+ {
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ virtual Expected<size_t> get_pending_frames_count() const
+ {
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &/*callback*/)
+ {
+ return HAILO_INVALID_OPERATION;
+ }
+
+ CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+
+protected:
+ explicit InputStreamBase(const LayerInfo &layer_info, hailo_stream_interface_t stream_interface,
+ EventPtr &&core_op_activated_event, hailo_status &status) :
+ m_core_op_activated_event(std::move(core_op_activated_event))
+ {
+ m_stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
+
+ const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer_info);
+ auto nn_stream_config = HefConfigurator::parse_nn_stream_config(layer_info,
+ hw_padding_supported && (HAILO_STREAM_INTERFACE_MIPI != stream_interface)); // On MIPI networks, we don't want to use hw padding nn stream config.
+ if(!nn_stream_config) {
+ LOGGER__ERROR("Failed parse nn stream config");
+ status = nn_stream_config.status();
+ return;
+ }
+ m_nn_stream_config = nn_stream_config.release();
+ status = HAILO_SUCCESS;
+ }
+
+ InputStreamBase(const hailo_stream_info_t &stream_info,
+ const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event);
+
+ virtual EventPtr &get_core_op_activated_event() override;
+ virtual bool is_scheduled() override;
+
+private:
+ friend class InputStreamWrapper;
+
+ EventPtr m_core_op_activated_event;
+};
+
+
+class OutputStreamBase : public OutputStream
+{
+public:
+ virtual ~OutputStreamBase() = default;
+
+ virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config()
+ {
+ return m_nn_stream_config;
+ };
+
+ virtual const LayerInfo& get_layer_info() override
+ {
+ return m_layer_info;
+ };
+
+ virtual Expected<size_t> get_buffer_frames_size() const
+ {
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ virtual Expected<size_t> get_pending_frames_count() const
+ {
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &/*callback*/)
+ {
+ return HAILO_INVALID_OPERATION;
+ }
+
+ CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
+
+protected:
+ explicit OutputStreamBase(const LayerInfo &layer_info,
+ EventPtr &&core_op_activated_event, hailo_status &status) :
+ m_layer_info(layer_info), m_core_op_activated_event(std::move(core_op_activated_event))
+ {
+ m_stream_info = LayerInfoUtils::get_stream_info_from_layer_info(m_layer_info);
+
+ const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(m_layer_info);
+ auto nn_stream_config = HefConfigurator::parse_nn_stream_config(m_layer_info, hw_padding_supported);
+ if(!nn_stream_config) {
+ LOGGER__ERROR("Failed parse nn stream config");
+ status = nn_stream_config.status();
+ return;
+ }
+ m_nn_stream_config = nn_stream_config.release();
+ status = HAILO_SUCCESS;
+ }
+
+ OutputStreamBase(const LayerInfo &layer_info, const hailo_stream_info_t &stream_info,
+ const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event);
+
+ virtual EventPtr &get_core_op_activated_event() override;
+ virtual bool is_scheduled() override;
+
+ LayerInfo m_layer_info;
+
+private:
+ friend class OutputStreamWrapper;
+
+ EventPtr m_core_op_activated_event;
+};
+
+} /* namespace hailort */
+
+#endif /* _STREAM_INTERNAL_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file stream_internal.cpp
- * @brief Implementation of InputStreamBase and OutputStreamBase
- **/
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "common/logger_macros.hpp"
-#include "hailo/transform.hpp"
-#include "common/utils.hpp"
-
-namespace hailort
-{
-
-InputStreamBase::InputStreamBase(const hailo_stream_info_t &stream_info,
- const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &network_group_activated_event) :
- m_nn_stream_config(nn_stream_config), m_network_group_activated_event(network_group_activated_event)
-{
- m_stream_info = stream_info;
-}
-
-EventPtr &InputStreamBase::get_network_group_activated_event()
-{
- return m_network_group_activated_event;
-}
-
-bool InputStreamBase::is_scheduled()
-{
- return false;
-}
-
-OutputStreamBase::OutputStreamBase(const LayerInfo &layer_info, const hailo_stream_info_t &stream_info,
- const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &network_group_activated_event) :
- m_nn_stream_config(nn_stream_config), m_layer_info(layer_info), m_network_group_activated_event(network_group_activated_event)
-{
- m_stream_info = stream_info;
-}
-
-EventPtr &OutputStreamBase::get_network_group_activated_event()
-{
- return m_network_group_activated_event;
-}
-
-bool OutputStreamBase::is_scheduled()
-{
- return false;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file stream_internal.hpp
- * @brief Class declaration for InputStreamBase/OutputStreamBase that implement the basic InputStream/OutputStream
- * "interface" (not technically an interface, but good enough). All internal input/output streams
- * should inherit from the InputStreamBase/OutputStreamBase classes.
- * Hence, the hierarchy is as follows:
- *
- * InputStream (External "interface")
- * |-- InputStreamBase (Base class)
- * |-- VdmaInputStream (Base class for vdma streams)
- * | |-- PcieInputStream
- * | |-- CoreInputStream
- * |-- EthernetInputStream
- * |-- MipiInputStream
- *
- *
- * OutputStream (External "interface")
- * |-- OutputStreamBase (Base class)
- * |-- VdmaOutputStream (Base class for vdma streams)
- * | |-- PcieOutputStream
- * | |-- CoreOutputStream
- * |-- EthernetOutputStream
- *
- **/
-
-#ifndef _STREAM_INTERNAL_HPP_
-#define _STREAM_INTERNAL_HPP_
-
-#include "hailo/stream.hpp"
-#include "hailo/event.hpp"
-#include "hailo/hailort_common.hpp"
-#include "hef_internal.hpp"
-#include "control_protocol.hpp"
-#include "layer_info.hpp"
-#include "vdma_channel.hpp"
-
-namespace hailort
-{
-
-typedef struct hailo_mux_info_t{
- hailo_stream_info_t info;
- uint32_t row_size;
- uint32_t row_counter;
- uint32_t rows_gcd;
- uint32_t offset;
- uint32_t current_offset; // Current demuxing offset
- uint32_t successors_count;
- struct hailo_mux_info_t *successors[HailoRTCommon::MAX_MUX_PREDECESSORS];
- void* buffer;
-} hailo_mux_info_t;
-
-class InputStreamWrapper;
-class OutputStreamWrapper;
-
-class InputStreamBase : public InputStream
-{
-public:
- virtual ~InputStreamBase() = default;
-
- InputStreamBase(const InputStreamBase&) = delete;
- InputStreamBase& operator=(const InputStreamBase&) = delete;
- InputStreamBase(InputStreamBase&&) = default;
-
- virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config()
- {
- return m_nn_stream_config;
- };
-
- virtual hailo_status send_pending_buffer(size_t device_index = 0)
- {
- (void)device_index;
- return HAILO_INVALID_OPERATION;
- }
-
- virtual Expected<size_t> get_buffer_frames_size() const
- {
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- virtual Expected<size_t> get_pending_frames_count() const
- {
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
-
-protected:
- explicit InputStreamBase(const LayerInfo &layer_info, hailo_stream_interface_t stream_interface,
- EventPtr &&network_group_activated_event, hailo_status &status) :
- m_network_group_activated_event(std::move(network_group_activated_event))
- {
- m_stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
-
- const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer_info);
- auto nn_stream_config = HefConfigurator::parse_nn_stream_config(layer_info,
- hw_padding_supported && (HAILO_STREAM_INTERFACE_MIPI != stream_interface)); // On MIPI networks, we don't want to use hw padding nn stream config.
- if(!nn_stream_config) {
- LOGGER__ERROR("Failed parse nn stream config");
- status = nn_stream_config.status();
- return;
- }
- m_nn_stream_config = nn_stream_config.release();
- status = HAILO_SUCCESS;
- }
-
- InputStreamBase(const hailo_stream_info_t &stream_info,
- const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &network_group_activated_event);
-
- virtual EventPtr &get_network_group_activated_event() override;
- virtual bool is_scheduled() override;
-
-private:
- friend class InputStreamWrapper;
-
- EventPtr m_network_group_activated_event;
-};
-
-
-class OutputStreamBase : public OutputStream
-{
-public:
- virtual ~OutputStreamBase() = default;
-
- OutputStreamBase(const OutputStreamBase&) = delete;
- OutputStreamBase& operator=(const OutputStreamBase&) = delete;
- OutputStreamBase(OutputStreamBase&&) = default;
-
- virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config()
- {
- return m_nn_stream_config;
- };
-
- virtual const LayerInfo& get_layer_info() override
- {
- return m_layer_info;
- };
-
- virtual Expected<size_t> get_buffer_frames_size() const
- {
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- virtual Expected<size_t> get_pending_frames_count() const
- {
- return make_unexpected(HAILO_INVALID_OPERATION);
- }
-
- virtual hailo_status register_for_d2h_interrupts(const std::function<void(uint32_t)> &/*callback*/)
- {
- return HAILO_INVALID_OPERATION;
- }
-
- CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
-
-protected:
- explicit OutputStreamBase(const LayerInfo &layer_info,
- EventPtr &&network_group_activated_event, hailo_status &status) :
- m_layer_info(layer_info), m_network_group_activated_event(std::move(network_group_activated_event))
- {
- m_stream_info = LayerInfoUtils::get_stream_info_from_layer_info(m_layer_info);
-
- const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(m_layer_info);
- auto nn_stream_config = HefConfigurator::parse_nn_stream_config(m_layer_info, hw_padding_supported);
- if(!nn_stream_config) {
- LOGGER__ERROR("Failed parse nn stream config");
- status = nn_stream_config.status();
- return;
- }
- m_nn_stream_config = nn_stream_config.release();
- status = HAILO_SUCCESS;
- }
-
- OutputStreamBase(const LayerInfo &layer_info, const hailo_stream_info_t &stream_info,
- const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &network_group_activated_event);
-
- virtual EventPtr &get_network_group_activated_event() override;
- virtual bool is_scheduled() override;
-
- LayerInfo m_layer_info;
-
-private:
- friend class OutputStreamWrapper;
-
- EventPtr m_network_group_activated_event;
-};
-
-} /* namespace hailort */
-
-#endif /* _STREAM_INTERNAL_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file thread_safe_map.hpp
- * @brief Thread safe map
- **/
-
-#ifndef HAILO_THREAD_SAFE_MAP_HPP_
-#define HAILO_THREAD_SAFE_MAP_HPP_
-
-#include <map>
-#include <mutex>
-
-namespace hailort
-{
-
-template<class K, class V>
-class SafeMap {
-public:
- SafeMap() : m_map(), m_mutex() {}
- virtual ~SafeMap() = default;
- SafeMap(SafeMap &&map) : m_map(std::move(map.m_map)), m_mutex() {};
-
- V& operator[](const K& k) {
- std::lock_guard<std::mutex> lock(m_mutex);
- return m_map[k];
- }
-
- V& operator[](K&& k) {
- std::lock_guard<std::mutex> lock(m_mutex);
- return m_map[k];
- }
-
- V& at(K& k) {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.at(k);
- }
-
- V& at(const K& k) {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.at(k);
- }
-
- std::size_t size() {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.size();
- }
-
- typename std::map<K, V>::iterator find(K& k) {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.find(k);
- }
-
- typename std::map<K, V>::iterator find(const K& k) {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.find(k);
- }
-
- bool contains(const K &k) {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.find(k) != m_map.end();
- }
-
- void clear() {
- std::unique_lock<std::mutex> lock(m_mutex);
- m_map.clear();
- }
-
- typename std::map<K, V>::iterator begin() {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.begin();
- }
-
- typename std::map<K, V>::iterator end() {
- std::unique_lock<std::mutex> lock(m_mutex);
- return m_map.end();
- }
-
-protected:
- std::map<K, V> m_map;
- mutable std::mutex m_mutex;
-};
-
-} /* namespace hailort */
-
-#endif // HAILO_THREAD_SAFE_MAP_HPP_
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file thread_safe_queue.hpp
- * @brief Thread safe queue taken from https://stackoverflow.com/a/16075550
- **/
-
-#ifndef HAILO_THREAD_SAFE_QUEUE_HPP_
-#define HAILO_THREAD_SAFE_QUEUE_HPP_
-
-#include "hailo/expected.hpp"
-#include "common/utils.hpp"
-#include "hailo/event.hpp"
-#include "common/logger_macros.hpp"
-#include "event_internal.hpp"
-
-// Define __unix__ for inclusion of readerwriterqueue.h because readerwriterqueue is implemented over POSIX standards
-// but checks __unix__ - otherwise QNX returns unsupported platform (need HAILO_UNDEF_UNIX_FLAG in order to undefine
-// __unix__ only in case of defining it here)
-#if defined(__QNX__) && !defined(__unix__)
-#define __unix__
-#define HAILO_UNDEF_UNIX_FLAG
-#endif
-
-#include "readerwriterqueue.h"
-
-#if defined(HAILO_UNDEF_UNIX_FLAG)
-#undef __unix__
-#undef HAILO_UNDEF_UNIX_FLAG
-#endif
-
-#include <queue>
-#include <mutex>
-#include <memory>
-#include <condition_variable>
-#include <chrono>
-
-namespace hailort
-{
-
-#define DEFAULT_TIMEOUT_MS (1000)
-
-// A threadsafe-queue. - https://stackoverflow.com/a/16075550
-template <class T>
-class SafeQueue {
-public:
- SafeQueue() : m_queue(), m_mutex(), m_queue_not_empty(), m_timeout(DEFAULT_TIMEOUT_MS) {}
- virtual ~SafeQueue() = default;
-
- // Add an element to the queue.
- virtual void push(T t) {
- std::lock_guard<std::mutex> lock(m_mutex);
- m_queue.push(t);
- m_queue_not_empty.notify_one();
- }
-
- // Get the "front"-element.
- // If the queue is empty, wait till a element is available.
- virtual T pop() {
- std::unique_lock<std::mutex> lock(m_mutex);
- while (m_queue.empty()) {
- // release lock as long as the wait and require it afterwards.
- m_queue_not_empty.wait_for(lock, m_timeout);
- }
- T val = m_queue.front();
- m_queue.pop();
- return val;
- }
-
-protected:
- std::queue<T> m_queue;
- mutable std::mutex m_mutex;
- std::condition_variable m_queue_not_empty;
- const std::chrono::milliseconds m_timeout;
-};
-
- template <class T>
- class SafeQueueMaxSize : public SafeQueue<T> {
- public:
- SafeQueueMaxSize(uint32_t max_size) :
- SafeQueue<T>::SafeQueue(),
- m_max_size(max_size),
- m_queue_not_full()
- {}
- virtual ~SafeQueueMaxSize() = default;
-
- virtual void push(T t) override {
- std::unique_lock<std::mutex> lock(this->m_mutex);
- m_queue_not_full.wait(lock, [&]{return this->m_queue.size() < m_max_size;});
-
- this->m_queue.push(t);
- this->m_queue_not_empty.notify_one();
- }
-
- virtual T pop() override {
- std::unique_lock<std::mutex> lock(this->m_mutex);
- this->m_queue_not_empty.wait(lock, [&]{return !this->m_queue.empty();});
-
- T val = this->m_queue.front();
- this->m_queue.pop();
-
- if (this->m_queue.size() < m_max_size) {
- m_queue_not_full.notify_one();
- }
- return val;
- }
-protected:
- const uint32_t m_max_size;
- std::condition_variable m_queue_not_full;
-};
-
-// Single-Producer Single-Consumer Queue
-// The queue's size is limited
-template<typename T, size_t MAX_BLOCK_SIZE = 512>
-class SpscQueue
-{
-private:
- typedef moodycamel::ReaderWriterQueue<T, MAX_BLOCK_SIZE> ReaderWriterQueue;
-
-public:
- static constexpr auto INIFINITE_TIMEOUT() { return std::chrono::milliseconds(HAILO_INFINITE); }
-
- SpscQueue(size_t max_size, SemaphorePtr items_enqueued_sema, SemaphorePtr items_dequeued_sema,
- EventPtr shutdown_event, std::chrono::milliseconds default_timeout) :
- m_inner(max_size),
- m_items_enqueued_sema_or_shutdown(items_enqueued_sema, shutdown_event),
- m_items_enqueued_sema(items_enqueued_sema),
- m_items_dequeued_sema_or_shutdown(items_dequeued_sema, shutdown_event),
- m_items_dequeued_sema(items_dequeued_sema),
- m_default_timeout(default_timeout),
- m_size(max_size),
- m_enqueues_count(0),
- m_callback_mutex()
- {}
-
- virtual ~SpscQueue() = default;
- SpscQueue(SpscQueue &&other) :
- m_inner(std::move(other.m_inner)),
- m_items_enqueued_sema_or_shutdown(std::move(other.m_items_enqueued_sema_or_shutdown)),
- m_items_enqueued_sema(std::move(other.m_items_enqueued_sema)),
- m_items_dequeued_sema_or_shutdown(std::move(other.m_items_dequeued_sema_or_shutdown)),
- m_items_dequeued_sema(std::move(other.m_items_dequeued_sema)),
- m_default_timeout(std::move(other.m_default_timeout)),
- m_size(std::move(other.m_size)),
- m_enqueues_count(std::move(other.m_enqueues_count.load())),
- m_cant_enqueue_callback(std::move(other.m_cant_enqueue_callback)),
- m_can_enqueue_callback(std::move(other.m_can_enqueue_callback)),
- m_callback_mutex()
- {}
-
- static Expected<SpscQueue> create(size_t max_size, const EventPtr& shutdown_event,
- std::chrono::milliseconds default_timeout = std::chrono::milliseconds(1000))
- {
- if (0 == max_size) {
- LOGGER__ERROR("Invalid queue max_size (must be greater than zero)");
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
-
- // * items_enqueued_sema:
- // +1 for each enqueued item
- // -1 for each dequeued item
- // Blocks when there are no items in the queue (hence when the queue is built it starts at zero)
- // * items_dequeued_sema:
- // +1 for each dequeued item
- // -1 for each enqueued item
- // Blocks when the queue is full (which happens when it's value reaches zero, hence it starts at queue size)
- const auto items_enqueued_sema = Semaphore::create_shared(0);
- CHECK_AS_EXPECTED(nullptr != items_enqueued_sema, HAILO_OUT_OF_HOST_MEMORY, "Failed creating items_enqueued_sema semaphore");
-
- const auto items_dequeued_sema = Semaphore::create_shared(static_cast<uint32_t>(max_size));
- CHECK_AS_EXPECTED(nullptr != items_dequeued_sema, HAILO_OUT_OF_HOST_MEMORY, "Failed creating items_dequeued_sema semaphore");
-
- return SpscQueue(max_size, items_enqueued_sema, items_dequeued_sema, shutdown_event, default_timeout);
- }
-
- static std::shared_ptr<SpscQueue> create_shared(size_t max_size, const EventPtr& shutdown_event,
- std::chrono::milliseconds default_timeout = std::chrono::milliseconds(1000))
- {
- auto queue = create(max_size, shutdown_event, default_timeout);
- if (!queue) {
- LOGGER__ERROR("Failed creating queue. status={}", queue.status());
- return nullptr;
- }
-
- return make_shared_nothrow<SpscQueue>(queue.release());
- }
-
- static std::unique_ptr<SpscQueue> create_unique(size_t max_size, const EventPtr& shutdown_event,
- std::chrono::milliseconds default_timeout = std::chrono::milliseconds(1000))
- {
- auto queue = create(max_size, shutdown_event, default_timeout);
- if (!queue) {
- LOGGER__ERROR("Failed creating queue. status={}", queue.status());
- return nullptr;
- }
-
- return make_unique_nothrow<SpscQueue>(queue.release());
- }
-
- Expected<T> dequeue(std::chrono::milliseconds timeout, bool ignore_shutdown_event = false) AE_NO_TSAN
- {
- hailo_status wait_result = HAILO_UNINITIALIZED;
- if (ignore_shutdown_event) {
- wait_result = m_items_enqueued_sema->wait(timeout);
- } else {
- wait_result = m_items_enqueued_sema_or_shutdown.wait(timeout);
- }
-
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == wait_result) {
- LOGGER__TRACE("Shutdown event has been signaled");
- return make_unexpected(wait_result);
- }
- if (HAILO_TIMEOUT == wait_result) {
- LOGGER__TRACE("Timeout, the queue is empty");
- return make_unexpected(wait_result);
- }
- if (HAILO_SUCCESS != wait_result) {
- LOGGER__WARNING("m_items_enqueued_sema received an unexpected failure");
- return make_unexpected(wait_result);
- }
-
- // The queue isn't empty
- T result{};
- const bool success = m_inner.try_dequeue(result);
- assert(success);
- AE_UNUSED(success);
-
- {
- std::unique_lock<std::mutex> lock(m_callback_mutex);
- if ((m_size == m_enqueues_count) && m_can_enqueue_callback) {
- m_can_enqueue_callback();
- }
- m_enqueues_count--;
- }
-
- const auto signal_result = m_items_dequeued_sema_or_shutdown.signal();
- if (HAILO_SUCCESS != signal_result) {
- return make_unexpected(signal_result);
- }
- return result;
- }
-
- Expected<T> dequeue() AE_NO_TSAN
- {
- return dequeue(m_default_timeout);
- }
-
- hailo_status enqueue(const T& result, std::chrono::milliseconds timeout) AE_NO_TSAN
- {
- const auto wait_result = m_items_dequeued_sema_or_shutdown.wait(timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == wait_result) {
- LOGGER__TRACE("Shutdown event has been signaled");
- return wait_result;
- }
- if (HAILO_TIMEOUT == wait_result) {
- LOGGER__TRACE("Timeout, the queue is full");
- return wait_result;
- }
- if (HAILO_SUCCESS != wait_result) {
- LOGGER__WARNING("m_items_dequeued_sema received an unexpected failure");
- return wait_result;
- }
-
- // The queue isn't full
- const bool success = m_inner.try_enqueue(result);
- assert(success);
- AE_UNUSED(success);
-
- {
- std::unique_lock<std::mutex> lock(m_callback_mutex);
- m_enqueues_count++;
- if ((m_size == m_enqueues_count) && m_cant_enqueue_callback) {
- m_cant_enqueue_callback();
- }
- }
-
- return m_items_enqueued_sema_or_shutdown.signal();
- }
-
- inline hailo_status enqueue(const T& result) AE_NO_TSAN
- {
- return enqueue(result, m_default_timeout);
- }
-
- // TODO: Do away with two copies of this function? (SDK-16481)
- hailo_status enqueue(T&& result, std::chrono::milliseconds timeout, bool ignore_shutdown_event = false) AE_NO_TSAN
- {
- hailo_status wait_result = HAILO_UNINITIALIZED;
- if (ignore_shutdown_event) {
- wait_result = m_items_dequeued_sema->wait(timeout);
- } else {
- wait_result = m_items_dequeued_sema_or_shutdown.wait(timeout);
- }
-
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == wait_result) {
- LOGGER__TRACE("Shutdown event has been signaled");
- return wait_result;
- }
- if (HAILO_TIMEOUT == wait_result) {
- LOGGER__TRACE("Timeout, the queue is full");
- return wait_result;
- }
- if (HAILO_SUCCESS != wait_result) {
- LOGGER__WARNING("m_items_dequeued_sema received an unexpected failure");
- return wait_result;
- }
-
- // The queue isn't full
- const bool success = m_inner.try_enqueue(std::move(result));
- assert(success);
- AE_UNUSED(success);
-
- {
- std::unique_lock<std::mutex> lock(m_callback_mutex);
- m_enqueues_count++;
- if ((m_size == m_enqueues_count) && m_cant_enqueue_callback) {
- m_cant_enqueue_callback();
- }
- }
-
- return m_items_enqueued_sema_or_shutdown.signal();
- }
-
- // TODO: HRT-3810, remove hacky argument ignore_shutdown_event
- inline hailo_status enqueue(T&& result, bool ignore_shutdown_event = false) AE_NO_TSAN
- {
- return enqueue(std::move(result), m_default_timeout, ignore_shutdown_event);
- }
-
- size_t size_approx()
- {
- return m_inner.size_approx();
- }
-
- hailo_status clear() AE_NO_TSAN
- {
- auto status = HAILO_SUCCESS;
- while (HAILO_SUCCESS == status) {
- auto output = dequeue(std::chrono::milliseconds(0), true);
- status = output.status();
- }
-
- if (HAILO_TIMEOUT == status) {
- return HAILO_SUCCESS;
- }
- return status;
- }
-
- void set_on_cant_enqueue_callback(std::function<void()> callback)
- {
- m_cant_enqueue_callback = callback;
- }
-
- void set_on_can_enqueue_callback(std::function<void()> callback)
- {
- m_can_enqueue_callback = callback;
- }
-
-private:
- ReaderWriterQueue m_inner;
- WaitOrShutdown m_items_enqueued_sema_or_shutdown;
- SemaphorePtr m_items_enqueued_sema;
- WaitOrShutdown m_items_dequeued_sema_or_shutdown;
- SemaphorePtr m_items_dequeued_sema;
- std::chrono::milliseconds m_default_timeout;
-
- const size_t m_size;
- std::atomic_uint32_t m_enqueues_count;
- std::function<void()> m_cant_enqueue_callback;
- std::function<void()> m_can_enqueue_callback;
- std::mutex m_callback_mutex;
-};
-
-} /* namespace hailort */
-
-#endif // HAILO_THREAD_SAFE_QUEUE_HPP_
+++ /dev/null
-// Note:
-// * This module is taken from Facebook's open source Folly library: https://github.com/facebook/folly (v2020.08.17.00)
-// * Changes:
-// * Changes made to the module are delimited with "BEGIN/END HAILO CHANGES"
-// * The file has been renamed from "TokenBucket.h" to "token_bucket.hpp"
-// * Removed:
-// * folly namespace
-// * BasicTokenBucket
-// * From BasicDynamicTokenBucket:
-// * Copy ctor and assignment operator
-// * available()
-// * reset()
-// * Original file: https://github.com/facebook/folly/blob/v2020.08.17.00/folly/TokenBucket.h
-// * Copyright notices follow.
-
-/*
- * Copyright (c) Facebook, Inc. and its affiliates.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TOKEN_BUCKET_HPP_
-#define TOKEN_BUCKET_HPP_
-
-#include <algorithm>
-#include <atomic>
-#include <chrono>
-#include <thread>
-
-// BEGIN HAILO CHANGES
-#include <hailo/hailort.h>
-#include "hailo/expected.hpp"
-#include "os/microsec_timer.hpp"
-
-namespace hailort
-{
-// END HAILO CHANGES
-
-/**
- * Thread-safe (atomic) token bucket implementation.
- *
- * A token bucket (http://en.wikipedia.org/wiki/Token_bucket) models a stream
- * of events with an average rate and some amount of burstiness. The canonical
- * example is a packet switched network: the network can accept some number of
- * bytes per second and the bytes come in finite packets (bursts). A token
- * bucket stores up to a fixed number of tokens (the burst size). Some number
- * of tokens are removed when an event occurs. The tokens are replenished at a
- * fixed rate. Failure to allocate tokens implies resource is unavailable and
- * caller needs to implement its own retry mechanism. For simple cases where
- * caller is okay with a FIFO starvation-free scheduling behavior, there are
- * also APIs to 'borrow' from the future effectively assigning a start time to
- * the caller when it should proceed with using the resource. It is also
- * possible to 'return' previously allocated tokens to make them available to
- * other users. Returns in excess of burstSize are considered expired and
- * will not be available to later callers.
- *
- * This implementation records the last time it was updated. This allows the
- * token bucket to add tokens "just in time" when tokens are requested.
- *
- * The "dynamic" base variant allows the token generation rate and maximum
- * burst size to change with every token consumption.
- *
- * @tparam Clock Clock type, must be steady i.e. monotonic.
- */
-template <typename Clock = std::chrono::steady_clock>
-class BasicDynamicTokenBucket {
- static_assert(Clock::is_steady, "clock must be steady");
-
- public:
- /**
- * Constructor.
- *
- * @param zeroTime Initial time at which to consider the token bucket
- * starting to fill. Defaults to 0, so by default token
- * buckets are "full" after construction.
- */
- explicit BasicDynamicTokenBucket(double zeroTime = 0) noexcept
- : zeroTime_(zeroTime) {}
-
- BasicDynamicTokenBucket(const BasicDynamicTokenBucket&) = delete;
- BasicDynamicTokenBucket& operator=(const BasicDynamicTokenBucket&) = delete;
-
- // BEGIN HAILO CHANGES
- BasicDynamicTokenBucket(BasicDynamicTokenBucket&& other) :
- zeroTime_(other.zeroTime_.load())
- {}
- // END HAILO CHANGES
-
-
- /**
- * Returns the current time in seconds since Epoch.
- */
- static double defaultClockNow() noexcept {
- auto const now = Clock::now().time_since_epoch();
- return std::chrono::duration<double>(now).count();
- }
-
- /**
- * Attempts to consume some number of tokens. Tokens are first added to the
- * bucket based on the time elapsed since the last attempt to consume tokens.
- * Note: Attempts to consume more tokens than the burst size will always
- * fail.
- *
- * Thread-safe.
- *
- * @param toConsume The number of tokens to consume.
- * @param rate Number of tokens to generate per second.
- * @param burstSize Maximum burst size. Must be greater than 0.
- * @param nowInSeconds Current time in seconds. Should be monotonically
- * increasing from the nowInSeconds specified in
- * this token bucket's constructor.
- * @return True if the rate limit check passed, false otherwise.
- */
- bool consume(
- double toConsume,
- double rate,
- double burstSize,
- double nowInSeconds = defaultClockNow()) {
- assert(rate > 0);
- assert(burstSize > 0);
-
- if (nowInSeconds <= zeroTime_.load()) {
- return 0;
- }
-
- return consumeImpl(
- rate, burstSize, nowInSeconds, [toConsume](double& tokens) {
- if (tokens < toConsume) {
- return false;
- }
- tokens -= toConsume;
- return true;
- });
- }
-
- /**
- * Similar to consume, but always consumes some number of tokens. If the
- * bucket contains enough tokens - consumes toConsume tokens. Otherwise the
- * bucket is drained.
- *
- * Thread-safe.
- *
- * @param toConsume The number of tokens to consume.
- * @param rate Number of tokens to generate per second.
- * @param burstSize Maximum burst size. Must be greater than 0.
- * @param nowInSeconds Current time in seconds. Should be monotonically
- * increasing from the nowInSeconds specified in
- * this token bucket's constructor.
- * @return number of tokens that were consumed.
- */
- double consumeOrDrain(
- double toConsume,
- double rate,
- double burstSize,
- double nowInSeconds = defaultClockNow()) {
- assert(rate > 0);
- assert(burstSize > 0);
-
- if (nowInSeconds <= zeroTime_.load()) {
- return 0;
- }
-
- double consumed;
- consumeImpl(
- rate, burstSize, nowInSeconds, [&consumed, toConsume](double& tokens) {
- if (tokens < toConsume) {
- consumed = tokens;
- tokens = 0.0;
- } else {
- consumed = toConsume;
- tokens -= toConsume;
- }
- return true;
- });
- return consumed;
- }
-
- /**
- * Return extra tokens back to the bucket. This will move the zeroTime_
- * value back based on the rate.
- *
- * Thread-safe.
- */
- void returnTokens(double tokensToReturn, double rate) {
- assert(rate > 0);
- assert(tokensToReturn > 0);
-
- returnTokensImpl(tokensToReturn, rate);
- }
-
- // BEGIN HAILO CHANGES
- /**
- * Like consumeOrDrain but the call will always satisfy the asked for count.
- * It does so by borrowing tokens from the future (zeroTime_ will move
- * forward) if the currently available count isn't sufficient.
- *
- * Returns a Expected<double>. The Expected wont be set if the request
- * cannot be satisfied: only case is when it is larger than burstSize. The
- * value of the Expected is a double indicating the time in seconds that the
- * caller needs to wait at which the reservation becomes valid. The caller
- * could simply sleep for the returned duration to smooth out the allocation
- * to match the rate limiter or do some other computation in the meantime. In
- * any case, any regular consume or consumeOrDrain calls will fail to allocate
- * any tokens until the future time is reached.
- *
- * Note: It is assumed the caller will not ask for a very large count nor use
- * it immediately (if not waiting inline) as that would break the burst
- * prevention the limiter is meant to be used for.
- *
- * Thread-safe.
- */
- Expected<double> consumeWithBorrowNonBlocking(
- double toConsume,
- double rate,
- double burstSize,
- double nowInSeconds = defaultClockNow()) {
- assert(rate > 0);
- assert(burstSize > 0);
-
- if (burstSize < toConsume) {
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
-
- while (toConsume > 0) {
- double consumed =
- consumeOrDrain(toConsume, rate, burstSize, nowInSeconds);
- if (consumed > 0) {
- toConsume -= consumed;
- } else {
- double zeroTimeNew = returnTokensImpl(-toConsume, rate);
- double napTime = std::max(0.0, zeroTimeNew - nowInSeconds);
- return napTime;
- }
- }
- return 0;
- }
-
- /**
- * Convenience wrapper around non-blocking borrow to sleep inline until
- * reservation is valid.
- */
- bool consumeWithBorrowAndWait(
- double toConsume,
- double rate,
- double burstSize,
- double nowInSeconds = defaultClockNow()) {
- auto res = consumeWithBorrowNonBlocking(toConsume, rate, burstSize, nowInSeconds);
- if (!res.has_value()) {
- return false;
- }
- if (res.value() > 0) {
- MicrosecTimer::sleep(static_cast<uint64_t>(res.value() * 1000000));
- }
- return true;
- }
- // END HAILO CHANGES
-
- private:
- template <typename TCallback>
- bool consumeImpl(
- double rate,
- double burstSize,
- double nowInSeconds,
- const TCallback& callback) {
- auto zeroTimeOld = zeroTime_.load();
- double zeroTimeNew;
- do {
- auto tokens = std::min((nowInSeconds - zeroTimeOld) * rate, burstSize);
- if (!callback(tokens)) {
- return false;
- }
- zeroTimeNew = nowInSeconds - tokens / rate;
- } while (!zeroTime_.compare_exchange_weak(zeroTimeOld, zeroTimeNew));
-
- return true;
- }
-
- /**
- * Adjust zeroTime based on rate and tokenCount and return the new value of
- * zeroTime_. Note: Token count can be negative to move the zeroTime_ value
- * into the future.
- */
- double returnTokensImpl(double tokenCount, double rate) {
- auto zeroTimeOld = zeroTime_.load();
- double zeroTimeNew;
- do {
- zeroTimeNew = zeroTimeOld - tokenCount / rate;
- } while (!zeroTime_.compare_exchange_weak(zeroTimeOld, zeroTimeNew));
- return zeroTimeNew;
- }
-
- std::atomic<double> zeroTime_;
-};
-
-using DynamicTokenBucket = BasicDynamicTokenBucket<>;
-
-} /* namespace hailort */
-
-#endif /* TOKEN_BUCKET_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file tracer.cpp
- * @brief: Tracing mechanism for HailoRT + FW events
- *
- **/
-
-
-#include "tracer.hpp"
-#include "common/utils.hpp"
-#include "hailort_logger.hpp"
-
-#include <spdlog/sinks/rotating_file_sink.h>
-#include <spdlog/sinks/stdout_color_sinks.h>
-#include <spdlog/sinks/android_sink.h>
-#include <spdlog/sinks/null_sink.h>
-
-#include <iomanip>
-#include <sstream>
-
-#define SCHEDULER_PROFILER_NAME ("SchedulerProfiler")
-#define SCHEDULER_PROFILER_LOGGER_FILENAME ("scheduler_profiler.json")
-#define SCHEDULER_PROFILER_LOGGER_PATTERN ("%v")
-
-#define SCHEDULER_PROFILER_LOGGER_PATH ("SCHEDULER_PROFILER_LOGGER_PATH")
-
-#define PROFILER_ENV_VAR ("HAILO_ENABLE_PROFILER")
-
-namespace hailort
-{
-
-Tracer::Tracer()
-{
- auto should_trace_env = std::getenv(PROFILER_ENV_VAR);
- m_should_trace = ((nullptr != should_trace_env) && (strnlen(should_trace_env, 2) == 1) && (strncmp(should_trace_env, "1", 1) == 0));
- if (m_should_trace) {
- m_start_time = std::chrono::high_resolution_clock::now();
- int64_t time_since_epoch = std::chrono::duration_cast<std::chrono::milliseconds>(m_start_time.time_since_epoch()).count();
- m_handlers.push_back(std::make_unique<SchedulerProfilerHandler>(time_since_epoch));
- }
-}
-
-SchedulerProfilerHandler::SchedulerProfilerHandler(int64_t &start_time)
-#ifndef __ANDROID__
- : m_file_sink(HailoRTLogger::create_file_sink(HailoRTLogger::get_log_path(SCHEDULER_PROFILER_LOGGER_PATH), SCHEDULER_PROFILER_LOGGER_FILENAME, false)),
- m_first_write(true)
-#endif
-{
-#ifndef __ANDROID__
- spdlog::sinks_init_list sink_list = { m_file_sink };
- m_profiler_logger = make_shared_nothrow<spdlog::logger>(SCHEDULER_PROFILER_NAME, sink_list.begin(), sink_list.end());
- m_file_sink->set_level(spdlog::level::level_enum::info);
- m_file_sink->set_pattern(SCHEDULER_PROFILER_LOGGER_PATTERN);
- std::stringstream ss;
- ss << "{\"ms_since_epoch_zero_time\": \"" << start_time << "\",\n\"scheduler_actions\": [\n";
- m_profiler_logger->info(ss.str());
-#else
- (void)start_time;
-#endif
-}
-
-SchedulerProfilerHandler::~SchedulerProfilerHandler()
-{
- m_profiler_logger->info("]\n}");
-}
-
-struct JSON
-{
- std::unordered_map<std::string, std::string> members;
- JSON(const std::initializer_list<std::pair<const std::string, std::string>> &dict) : members{dict} {}
- JSON(const std::unordered_map<std::string, uint32_t> &dict) {
- for (auto &pair : dict) {
- members.insert({pair.first, std::to_string(pair.second)});
- }
- }
-};
-
-template<class T>
-std::string json_to_string(const T &val) {
- return std::to_string(val);
-}
-
-template<>
-std::string json_to_string(const std::string &val) {
- std::ostringstream os;
- os << std::quoted(val);
- return os.str();
-}
-
-template<>
-std::string json_to_string(const bool &bool_val) {
- return bool_val ? "true" : "false";
-}
-
-template<>
-std::string json_to_string(const JSON &json_val) {
- std::ostringstream os;
- os << "{\n";
- size_t i = 0;
- for (const auto &kv : json_val.members) {
- ++i;
- os << std::quoted(kv.first) << " : ";
- os << kv.second;
- if (i != json_val.members.size()) {
- os << ",\n";
- }
- }
- os << "\n}";
- return os.str();
-}
-
-bool SchedulerProfilerHandler::comma()
-{
- auto result = !m_first_write;
- m_first_write = false;
- return result;
-}
-
-void SchedulerProfilerHandler::log(JSON json)
-{
- m_profiler_logger->info("{}{}", comma() ? ",\n" : "", json_to_string(json));
-}
-
-void SchedulerProfilerHandler::handle_trace(const AddNetworkGroupTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_name", json_to_string(trace.network_group_name)},
- {"network_group_handle", json_to_string(trace.network_group_handle)},
- {"timeout", json_to_string((uint64_t)trace.timeout)},
- {"threshold", json_to_string((uint64_t)trace.threshold)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const CreateNetworkGroupInputStreamsTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_name", json_to_string(trace.network_group_name)},
- {"stream_name", json_to_string(trace.stream_name)},
- {"queue_size", json_to_string(trace.queue_size)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const CreateNetworkGroupOutputStreamsTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_name", json_to_string(trace.network_group_name)},
- {"stream_name", json_to_string(trace.stream_name)},
- {"queue_size", json_to_string(trace.queue_size)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const WriteFrameTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_handle", json_to_string(trace.network_group_handle)},
- {"queue_name", json_to_string(trace.queue_name)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const InputVdmaEnqueueTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_handle", json_to_string(trace.network_group_handle)},
- {"queue_name", json_to_string(trace.queue_name)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const ReadFrameTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_handle", json_to_string(trace.network_group_handle)},
- {"queue_name", json_to_string(trace.queue_name)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const OutputVdmaEnqueueTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_handle", json_to_string(trace.network_group_handle)},
- {"queue_name", json_to_string(trace.queue_name)},
- {"frames", json_to_string(trace.frames)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const ChooseNetworkGroupTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"chosen_network_group_handle", json_to_string(trace.network_group_handle)},
- {"threshold", json_to_string(trace.threshold)},
- {"timeout", json_to_string(trace.timeout)}
- }));
-}
-
-void SchedulerProfilerHandler::handle_trace(const SwitchNetworkGroupTrace &trace)
-{
- log(JSON({
- {"action", json_to_string(trace.name)},
- {"timestamp", json_to_string(trace.timestamp)},
- {"device_id", json_to_string(trace.device_id)},
- {"network_group_handle", json_to_string(trace.network_group_handle)}
- }));
-}
-
-
-}
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file tracer.hpp
- * @brief Tracing mechanism for HailoRT + FW events
- **/
-
-#ifndef _HAILO_TRACER_HPP_
-#define _HAILO_TRACER_HPP_
-
-#include "hailo/hailort.h"
-#include "common/logger_macros.hpp"
-#include "network_group_scheduler.hpp"
-
-#include <chrono>
-#include <memory>
-#include <vector>
-#include <map>
-#include <unordered_map>
-#include <atomic>
-#include <chrono>
-#include <sstream>
-#include <iomanip>
-
-
-namespace hailort
-{
-
-struct Trace
-{
- Trace(const std::string &name)
- : name(name)
- {}
-
- virtual ~Trace() = default;
-
- uint64_t timestamp = 0;
- std::string name;
-};
-
-struct InitTrace : Trace
-{
- InitTrace() : Trace("init") {}
-};
-
-struct AddNetworkGroupTrace : Trace
-{
- AddNetworkGroupTrace(const std::string &device_id, const std::string &network_group_name, uint64_t timeout, uint32_t threshold, scheduler_ng_handle_t handle)
- : Trace("add_network_group"), device_id(device_id), network_group_name(network_group_name), timeout(timeout), threshold(threshold), network_group_handle(handle)
- {}
-
- std::string device_id;
- std::string network_group_name;
- uint64_t timeout = 0;
- uint32_t threshold = 0;
- scheduler_ng_handle_t network_group_handle = INVALID_NETWORK_GROUP_HANDLE;
-};
-
-struct CreateNetworkGroupInputStreamsTrace : Trace
-{
- CreateNetworkGroupInputStreamsTrace(const std::string &device_id, const std::string &network_group_name, const std::string &stream_name, uint32_t queue_size)
- : Trace("create_input_stream"), device_id(device_id), network_group_name(network_group_name), stream_name(stream_name), queue_size(queue_size)
- {}
-
- std::string device_id;
- std::string network_group_name;
- std::string stream_name;
- uint32_t queue_size;
-};
-
-struct CreateNetworkGroupOutputStreamsTrace : Trace
-{
- CreateNetworkGroupOutputStreamsTrace(const std::string &device_id, const std::string &network_group_name, const std::string &stream_name, uint32_t queue_size)
- : Trace("create_output_stream"), device_id(device_id), network_group_name(network_group_name), stream_name(stream_name), queue_size(queue_size)
- {}
-
- std::string device_id;
- std::string network_group_name;
- std::string stream_name;
- uint32_t queue_size;
-};
-
-struct WriteFrameTrace : Trace
-{
- WriteFrameTrace(const std::string &device_id, scheduler_ng_handle_t network_group_handle, const std::string &queue_name)
- : Trace("wrte_frame"), device_id(device_id), network_group_handle(network_group_handle), queue_name(queue_name)
- {}
-
- std::string device_id;
- scheduler_ng_handle_t network_group_handle;
- std::string queue_name;
-};
-
-struct InputVdmaEnqueueTrace : Trace
-{
- InputVdmaEnqueueTrace(const std::string &device_id, scheduler_ng_handle_t network_group_handle, const std::string &queue_name)
- : Trace("input_vdma_enqueue"), device_id(device_id), network_group_handle(network_group_handle), queue_name(queue_name)
- {}
-
- std::string device_id;
- scheduler_ng_handle_t network_group_handle;
- std::string queue_name;
-};
-
-struct ReadFrameTrace : Trace
-{
- ReadFrameTrace(const std::string &device_id, scheduler_ng_handle_t network_group_handle, const std::string &queue_name)
- : Trace("read_frame"), device_id(device_id), network_group_handle(network_group_handle), queue_name(queue_name)
- {}
-
- std::string device_id;
- scheduler_ng_handle_t network_group_handle;
- std::string queue_name;
-};
-
-struct OutputVdmaEnqueueTrace : Trace
-{
- OutputVdmaEnqueueTrace(const std::string &device_id, scheduler_ng_handle_t network_group_handle, const std::string &queue_name, uint32_t frames)
- : Trace("output_vdma_enqueue"), device_id(device_id), network_group_handle(network_group_handle), queue_name(queue_name), frames(frames)
- {}
-
- std::string device_id;
- scheduler_ng_handle_t network_group_handle;
- std::string queue_name;
- uint32_t frames = 0;
-};
-
-struct ChooseNetworkGroupTrace : Trace
-{
- ChooseNetworkGroupTrace(const std::string &device_id, scheduler_ng_handle_t handle, bool threshold, bool timeout)
- : Trace("choose_network_group"), device_id(device_id), network_group_handle(handle), threshold(threshold), timeout(timeout)
- {}
-
- std::string device_id;
- scheduler_ng_handle_t network_group_handle;
- bool threshold = false;
- bool timeout = false;
-};
-
-struct SwitchNetworkGroupTrace : Trace
-{
- SwitchNetworkGroupTrace(const std::string &device_id, scheduler_ng_handle_t handle)
- : Trace("switch_network_group"), device_id(device_id), network_group_handle(handle)
- {}
-
- std::string device_id;
- scheduler_ng_handle_t network_group_handle;
-};
-
-class Handler
-{
-public:
- virtual ~Handler() = default;
-
- virtual void handle_trace(const InitTrace&) {};
- virtual void handle_trace(const AddNetworkGroupTrace&) {};
- virtual void handle_trace(const CreateNetworkGroupInputStreamsTrace&) {};
- virtual void handle_trace(const CreateNetworkGroupOutputStreamsTrace&) {};
- virtual void handle_trace(const WriteFrameTrace&) {};
- virtual void handle_trace(const InputVdmaEnqueueTrace&) {};
- virtual void handle_trace(const ReadFrameTrace&) {};
- virtual void handle_trace(const OutputVdmaEnqueueTrace&) {};
- virtual void handle_trace(const ChooseNetworkGroupTrace&) {};
- virtual void handle_trace(const SwitchNetworkGroupTrace&) {};
-};
-
-struct JSON;
-
-class SchedulerProfilerHandler : public Handler
-{
-public:
- SchedulerProfilerHandler(SchedulerProfilerHandler const&) = delete;
- void operator=(SchedulerProfilerHandler const&) = delete;
-
- SchedulerProfilerHandler(int64_t &start_time);
- ~SchedulerProfilerHandler();
-
- virtual void handle_trace(const AddNetworkGroupTrace&) override;
- virtual void handle_trace(const CreateNetworkGroupInputStreamsTrace&) override;
- virtual void handle_trace(const CreateNetworkGroupOutputStreamsTrace&) override;
- virtual void handle_trace(const WriteFrameTrace&) override;
- virtual void handle_trace(const InputVdmaEnqueueTrace&) override;
- virtual void handle_trace(const ReadFrameTrace&) override;
- virtual void handle_trace(const OutputVdmaEnqueueTrace&) override;
- virtual void handle_trace(const ChooseNetworkGroupTrace&) override;
- virtual void handle_trace(const SwitchNetworkGroupTrace&) override;
-
-private:
- void log(JSON json);
- bool comma();
-
- std::shared_ptr<spdlog::sinks::sink> m_file_sink;
- std::shared_ptr<spdlog::logger> m_profiler_logger;
- std::atomic<bool> m_first_write;
-};
-
-class Tracer
-{
-public:
- template<class TraceType, typename... Args>
- static void trace(Args... trace_args)
- {
- auto &tracer = get_instance();
- tracer.execute_trace<TraceType>(trace_args...);
- }
-
-private:
- Tracer();
-
- static Tracer& get_instance()
- {
- static Tracer tracer;
- return tracer;
- }
-
- template<class TraceType, typename... Args>
- void execute_trace(Args... trace_args)
- {
- if (!m_should_trace) {
- return;
- }
-
- TraceType trace_struct(trace_args...);
- auto curr_time = std::chrono::high_resolution_clock::now();
- trace_struct.timestamp = std::chrono::duration_cast<std::chrono::milliseconds>(curr_time - this->m_start_time).count();
- for (auto &handler : this->m_handlers) {
- handler->handle_trace(trace_struct);
- }
- }
-
- bool m_should_trace = false;
- std::chrono::high_resolution_clock::time_point m_start_time;
- std::vector<std::unique_ptr<Handler>> m_handlers;
-};
-
-}
-
-#endif
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file tracer_macros.hpp
- * @brief Macros for tracing mechanism for HailoRT + FW events
- **/
-
-#ifndef _HAILO_TRACER_MACROS_HPP_
-#define _HAILO_TRACER_MACROS_HPP_
-
-#if defined HAILO_ENABLE_PROFILER_BUILD
-#include "tracer.hpp"
-#endif
-
-namespace hailort
-{
-
-struct VoidAll {
- template<typename... Args> VoidAll(Args const& ...) {}
-};
-
-#if defined HAILO_ENABLE_PROFILER_BUILD
-#define TRACE(type, ...) (Tracer::trace<type>(__VA_ARGS__))
-#else
-#define TRACE(type, ...) {VoidAll temporary_name{__VA_ARGS__};}
-#endif
-
-}
-
-#endif // _HAILO_TRACER_MACROS_HPP_
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file transform.cpp
- * @brief Implements transform module
- **/
-#include "hailo/transform.hpp"
-#include "hailo/hailort.h"
-#include "hailo/stream.hpp"
-#include "hailo/expected.hpp"
-#include "hailo/hailort_common.hpp"
-#include "hailo/quantization.hpp"
-#include "hailort_defaults.hpp"
-#include "common/compiler_extensions_compat.hpp"
-#include "common/logger_macros.hpp"
-#include "common/utils.hpp"
-#include "transform_internal.hpp"
-
-#include <type_traits>
-#include <sstream>
-
-namespace hailort
-{
-
-#define HW_DATA_ALIGNMENT (8)
-#define RGB_FEATURES (3)
-
-
-bool TransformContextUtils::should_quantize(const hailo_stream_direction_t stream_direction,
- const hailo_format_t &src_format, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info)
-{
- if (HAILO_H2D_STREAM == stream_direction) {
- return (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) &&
- (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags) &&
- !((Quantization::is_identity_qp(quant_info)) && (src_format.type == dst_format.type)));
- } else {
- return (HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) &&
- !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags);
- }
-}
-
-bool TransformContextUtils::should_transpose(const hailo_format_flags_t &src_flags, const hailo_format_flags_t &dst_flags)
-{
- return ((HAILO_FORMAT_FLAGS_TRANSPOSED & src_flags) != (HAILO_FORMAT_FLAGS_TRANSPOSED & dst_flags));
-}
-
-bool TransformContextUtils::should_reorder(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format)
-{
-
- /* If shapes and format are different - need to use transform_context */
- if (!((src_image_shape.features == dst_image_shape.features) &&
- (src_image_shape.height == dst_image_shape.height) &&
- (src_image_shape.width == dst_image_shape.width) &&
- (src_format.order == dst_format.order) &&
- (src_format.type == dst_format.type))) {
- return true;
- }
-
- /* Some orders has to be reordered, even if shapes and types are the same
- Note: In order to add new order to the list - add test to test_transform with all shapes and types same
- pre and post transform */
- switch (src_format.order) {
- case HAILO_FORMAT_ORDER_NHWC:
- case HAILO_FORMAT_ORDER_NHCW:
- case HAILO_FORMAT_ORDER_NC:
- case HAILO_FORMAT_ORDER_NHW:
- case HAILO_FORMAT_ORDER_FCR:
- case HAILO_FORMAT_ORDER_BAYER_RGB:
- case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
- case HAILO_FORMAT_ORDER_YUY2:
- return false;
- case HAILO_FORMAT_ORDER_F8CR:
- case HAILO_FORMAT_ORDER_HAILO_NMS:
- case HAILO_FORMAT_ORDER_RGB888:
- case HAILO_FORMAT_ORDER_NCHW:
- case HAILO_FORMAT_ORDER_NV12:
- case HAILO_FORMAT_ORDER_NV21:
- return true;
- default:
- LOGGER__WARN("Hailo Internal warning - Unrecognised order. Transformation optimization would not be activated");
- /* In case user asks to add new order - please add this order to one of the true or false lists */
- assert(false);
- return true;
- }
-}
-
-bool TransformContextUtils::is_transformation_required(const hailo_stream_direction_t stream_direction,
- const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info)
-{
- /* This function should be called after auto expend function */
- assert((HAILO_FORMAT_ORDER_AUTO != src_format.order) && (HAILO_FORMAT_ORDER_AUTO != dst_format.order));
- assert((HAILO_FORMAT_TYPE_AUTO != src_format.type) && (HAILO_FORMAT_TYPE_AUTO != dst_format.type));
-
- return (should_quantize(stream_direction, src_format, dst_format, quant_info) ||
- should_transpose(src_format.flags, dst_format.flags) ||
- should_reorder(src_image_shape, src_format, dst_image_shape, dst_format));
-}
-
-std::string TransformContextUtils::make_quantization_description(hailo_format_type_t src_type,
- hailo_format_type_t dst_type, hailo_quant_info_t quant_info)
-{
- std::stringstream quant_description;
- quant_description << "Quantization - src_type: " << HailoRTCommon::get_format_type_str(src_type) <<
- ", dst_type " << HailoRTCommon::get_format_type_str(dst_type) <<
- ", qp_scale: " << quant_info.qp_scale <<
- ", qp_zp: " << quant_info.qp_zp <<
- ", limvals_min: " << quant_info.limvals_min <<
- ", limvals_max: " << quant_info.limvals_max;
-
- return quant_description.str();
-}
-
-std::string TransformContextUtils::make_reorder_description(hailo_format_order_t src_order, hailo_3d_image_shape_t src_shape,
- hailo_format_order_t dst_order, hailo_3d_image_shape_t dst_shape)
-{
- std::stringstream reorder_description;
- reorder_description << "Reorder - src_order: " << HailoRTCommon::get_format_order_str(src_order) << ", src_shape: (" <<
- src_shape.height << ", " << src_shape.width << ", " << src_shape.features << ")" <<
- ", dst_order: " << HailoRTCommon::get_format_order_str(dst_order) << ", dst_shape: (" <<
- dst_shape.height << ", " << dst_shape.width << ", " << dst_shape.features << ")";
-
- return reorder_description.str();
-}
-
-std::string TransformContextUtils::make_transpose_description(hailo_3d_image_shape_t src_shape, hailo_3d_image_shape_t transposed_shape)
-{
- std::stringstream transpose_description;
- transpose_description << "Transpose - src_shape: (" <<
- src_shape.height << ", " << src_shape.width << ", " << src_shape.features << ")" <<
- ", dst_shape: (" << transposed_shape.height << ", " << transposed_shape.width << ", " << transposed_shape.features << ")";
-
- return transpose_description.str();
-}
-
-template<typename T, typename Q>
-void cast_elements_inplace(T *dst_ptr, uint32_t frame_size)
-{
- static_assert(sizeof(T) >= sizeof(Q), "cast_elements_inplace() cannot cast to smaller size");
- for (int32_t i = (int32_t)frame_size - 1; i >= 0; i--) {
- dst_ptr[i] = (T)(*((Q*)dst_ptr + i));
- }
-}
-
-/* Transpose funcs */
-static hailo_3d_image_shape_t transposed_shape(const hailo_3d_image_shape_t &shape)
-{
- hailo_3d_image_shape_t transposed_shape = shape;
- std::swap(transposed_shape.height, transposed_shape.width);
- return transposed_shape;
-}
-
-static hailo_status transform__transpose_NHWC(const void *src_ptr, const hailo_3d_image_shape_t &shape,
- size_t feature_bytes_size, void *dst_ptr)
-{
- // Flatten the features, look at the data as HW matrix
- const size_t element_size = shape.features * feature_bytes_size;
- const uint8_t *src_matrix = reinterpret_cast<const uint8_t*>(src_ptr);
- uint8_t *dst_matrix = reinterpret_cast<uint8_t*>(dst_ptr);
- for (size_t r = 0; r < shape.height; r++) {
- for (size_t c = 0; c < shape.width; c++) {
- // dest[c][r] = src[r][c]
- size_t src_offset = element_size * ((r * shape.width) + c);
- const uint8_t *src_pos = src_matrix + src_offset;
-
- size_t dst_offset = element_size * ((c * shape.height) + r);
- uint8_t *dst_pos = dst_matrix + dst_offset;
-
- memcpy(dst_pos, src_pos, element_size);
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status transform__transpose_buffer(const void *src_ptr, const hailo_3d_image_shape_t &shape,
- const hailo_format_t &format, void *dst_ptr)
-{
- switch (format.order)
- {
- case HAILO_FORMAT_ORDER_NHWC:
- case HAILO_FORMAT_ORDER_NHW:
- case HAILO_FORMAT_ORDER_BAYER_RGB:
- case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
- case HAILO_FORMAT_ORDER_FCR:
- case HAILO_FORMAT_ORDER_F8CR:
- return transform__transpose_NHWC(src_ptr, shape, HailoRTCommon::get_format_data_bytes(format), dst_ptr);
- default:
- LOGGER__ERROR("Transpose is not supported for order {}", format.order);
- return HAILO_INVALID_OPERATION;
- }
-}
-
-hailo_status transpose_buffer(const MemoryView src, const hailo_3d_image_shape_t &shape,
- const hailo_format_t &format, MemoryView dst)
-{
- if ((src.size() != dst.size()) || (src.size() != HailoRTCommon::get_frame_size(shape, format))) {
- LOGGER__ERROR("transpose NHWC invalid buffers size");
- return HAILO_INVALID_ARGUMENT;
- }
-
- return transform__transpose_buffer(src.data(), shape, format, dst.data());
-}
-
-
-/* Re-Ordering funcs */
-template<typename T>
-void transform__h2d_NHWC_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- size_t src_offset = 0;
- size_t dst_offset = 0;
- uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
- uint32_t pad_size = (dst_image_shape->width - src_image_shape->width) * dst_image_shape->features;
-
- /* copy src to dst, and pad width to 8 elements */
- for (uint32_t r = 0; r < src_image_shape->height ; r++) {
- src_offset = r * src_image_shape->width * src_image_shape->features;
- dst_offset = r * dst_image_shape->width * dst_image_shape->features;
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, src_row_size * sizeof(T));
- memset(dst_ptr + dst_offset + src_row_size, 0, pad_size * sizeof(T));
- }
-}
-
-template<typename T>
-void transform__d2h_NHWC_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- size_t src_offset = 0;
- size_t dst_offset = 0;
-
- // copy and removed padded features
- for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
- for (uint32_t c = 0; c < dst_image_shape->width ; c++) {
- src_offset = r * src_image_shape->width * src_image_shape->features + c * src_image_shape->features;
- dst_offset = r * dst_image_shape->width * dst_image_shape->features + c * dst_image_shape->features;
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, dst_image_shape->features * sizeof(T));
- }
- }
-}
-
-template<typename T>
-void transform__h2d_NV12_to_NV12(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape, T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
- uint32_t rows_count = src_image_shape->height * src_image_shape->features;
- ASSERT(0 == fmod(rows_count, 1.5));
- ASSERT(0 == (src_image_shape->width % 2));
-
- auto row_leftover = dst_image_shape->width - src_image_shape->width;
-
- size_t src_offset_y = 0;
- size_t src_offset_uv = ((static_cast<uint32_t>(rows_count / 1.5)) * src_image_shape->width);
- size_t dst_offset = 0;
-
- for(uint32_t h = 0; h < (static_cast<uint32_t>(rows_count / 1.5)); h += 2) {
- /* Copy 2 rows of Y for each row of U,V */
- // Copy Y
- for (auto i = 0; i < 2; i++) {
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset_y, (src_image_shape->width * sizeof(T)));
- src_offset_y += (src_image_shape->width);
- dst_offset += (src_image_shape->width);
- memset((dst_ptr + dst_offset), 0, (row_leftover * sizeof(T)));
- dst_offset += row_leftover;
- }
-
- // Copy U, V
- memcpy(dst_ptr + dst_offset, (src_ptr + src_offset_uv), (src_image_shape->width * sizeof(T)));
- src_offset_uv += src_image_shape->width;
- dst_offset += src_image_shape->width;
- memset((dst_ptr + dst_offset), 0, (row_leftover * sizeof(T)));
- dst_offset += row_leftover;
- }
-}
-
-template<typename T>
-void transform__h2d_NHWC_to_NHCW(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
- uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
-
- size_t src_offset = 0;
- size_t dst_offset = 0;
- uint32_t pad_size = dst_image_shape->width - src_image_shape->width;
-
- /* transpose - switch width and channels */
- for (uint32_t r = 0; r < src_image_shape->height ; r++) {
- for (uint32_t f = 0; f < src_image_shape->features; f++) {
- for (uint32_t c = 0; c < src_image_shape->width; c++) {
- src_offset = r * src_row_size + c * src_image_shape->features + f;
- dst_offset = r * dst_row_size + f * dst_image_shape->width + c;
- dst_ptr[dst_offset] = src_ptr[src_offset];
- }
- /* pad width to 8 elemnts */
- if (pad_size != 0) {
- dst_offset = r * dst_row_size + f * dst_image_shape->width + src_image_shape->width;
- memset(dst_ptr + dst_offset, 0, pad_size);
- }
- }
- }
-}
-
-template<typename T>
-void transform__d2h_NHCW_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- /* transpose - switch channels and width, ignore padded elements */
- const auto row_size_src = src_image_shape->width * src_image_shape->features;
- const auto row_size_dest = dst_image_shape->width * dst_image_shape->features;
- for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
- const auto row_offset_src = r * row_size_src;
- const auto row_offset_dest = r * row_size_dest;
- for (uint32_t c = 0; c < dst_image_shape->width; c++) {
- const auto src_offset = row_offset_src + c;
- const auto dest_offset = row_offset_dest + c * dst_image_shape->features;
- for (uint32_t f = 0; f < dst_image_shape->features; f++) {
- dst_ptr[dest_offset + f] = src_ptr[src_offset + f * src_image_shape->width];
- }
- }
- }
-}
-
-template<typename T>
-void transform__d2h_NHW_to_NHW(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape, T *dst_ptr,
- hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- for (uint32_t row = 0; row < dst_image_shape->height; row++) {
- const T *src = src_ptr + (row * src_image_shape->width);
- T* dst = dst_ptr + row * dst_image_shape->width;
- std::copy_n(src, dst_image_shape->width, dst);
- }
-}
-
-template<typename T>
-void transform__h2d_NC_to_NC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- /* copy src to dst, and pad channels to 8 elements */
- memcpy(dst_ptr, src_ptr, src_image_shape->features * sizeof(T));
- memset(dst_ptr + src_image_shape->features, 0, (dst_image_shape->features - src_image_shape->features) * sizeof(T));
-}
-
-template<typename T>
-void transform__d2h_NC_to_NC(const T *src_ptr, T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- memcpy(dst_ptr, src_ptr, dst_image_shape->features * sizeof(T));
-}
-
-static inline void transform__parse_and_copy_bbox (hailo_bbox_t *dst, uint64_t* proposal)
-{
- dst->y_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000000) >> 36);
- dst->x_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000) >> 24);
- dst->y_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff000) >> 12);
- dst->x_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff));
- dst->score = (uint16_t)((*((uint64_t*)proposal) & 0xffff000000000000) >> 48);
-}
-
-void transform__d2h_NMS(const uint8_t *src_ptr, uint8_t *dst_ptr, const hailo_nms_info_t &nms_info, std::vector<size_t> &chunk_offsets)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- uint32_t num_of_classes = nms_info.number_of_classes;
- uint32_t bbox_size = nms_info.bbox_size;
-
- size_t bbox_index = 0;
- size_t src_offset = 0;
- size_t dst_offset = 0;
-
- nms_bbox_counter_t class_bboxes_count = 0;
-
- // For each class, we need to merge bboxes from all nms chunks. Therefore we use chunk_offsets - for
- // each nms chunk we store its offset, any time we finish parsing some class bboxes, we update the
- // offset
-
- // First, init the chunk_offset vector
- assert(chunk_offsets.size() == nms_info.chunks_per_frame);
- size_t current_offset = 0;
- chunk_offsets[0] = current_offset;
- for (size_t chunk_index = 1; chunk_index < nms_info.chunks_per_frame; chunk_index++) {
- // Skip all classes. Can be optimized if we store the size of each chunk in the begining of the buffer
- for (size_t class_index = 0; class_index < num_of_classes; class_index++) {
- class_bboxes_count = *(reinterpret_cast<const nms_bbox_counter_t*>(src_ptr + current_offset));
- current_offset += sizeof(nms_bbox_counter_t) + (class_bboxes_count * bbox_size);
- }
- chunk_offsets[chunk_index] = current_offset;
- }
-
- // Now, the merge itself
- for (size_t class_index = 0; class_index < num_of_classes; class_index++) {
- nms_bbox_counter_t *dst_bbox_counter = reinterpret_cast<nms_bbox_counter_t*>(dst_ptr + dst_offset);
- *dst_bbox_counter = 0;
-
- dst_offset += sizeof(nms_bbox_counter_t);
-
- for (size_t chunk_index = 0; chunk_index < nms_info.chunks_per_frame; chunk_index++) {
- // Add bbox from all chunks of current class
- src_offset = chunk_offsets[chunk_index];
- class_bboxes_count = *((nms_bbox_counter_t*)((uint8_t*)src_ptr + src_offset));
- *dst_bbox_counter = static_cast<nms_bbox_counter_t>(*dst_bbox_counter + class_bboxes_count);
-
- src_offset += sizeof(nms_bbox_counter_t);
-
- for (bbox_index = 0; bbox_index < class_bboxes_count; bbox_index++) {
- transform__parse_and_copy_bbox((hailo_bbox_t *)(dst_ptr + dst_offset), (uint64_t*)(src_ptr + src_offset));
- src_offset += bbox_size;
- dst_offset += sizeof(hailo_bbox_t);
- }
-
- chunk_offsets[chunk_index] = src_offset;
- }
- }
-}
-
-template<typename T>
-void transform__h2d_FCR(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- size_t src_offset = 0;
- size_t dst_offset = 0;
- uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
- uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
- uint32_t pad_size = dst_image_shape->features - src_image_shape->features;
-
- for (uint32_t r = 0; r < src_image_shape->height ; r++) {
- for (uint32_t c = 0; c < src_image_shape->width; c++) {
- src_offset = r * src_row_size + c * src_image_shape->features;
- dst_offset = r * dst_row_size + c * dst_image_shape->features;
-
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, src_image_shape->features * sizeof(T));
- dst_offset += src_image_shape->features;
- memset(dst_ptr + dst_offset, 0, pad_size * sizeof(T));
- }
- }
-}
-
-template<typename T>
-void transform__h2d_F8CR(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
- uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
- uint32_t src_features = src_image_shape->features;
- size_t src_offset = 0;
- size_t dst_offset = 0;
-
- /* copy src data to dst, 8channels * width at a time, pad features to 8 elemnts */
- for (uint32_t r = 0; r < src_image_shape->height ; r++) {
- for (uint32_t c = 0; c < src_image_shape->width; c++) {
- for (uint32_t f = 0; f < src_image_shape->features; f+=8) {
- src_offset = r * src_row_size + c * src_image_shape->features + f;
- dst_offset = r * dst_row_size + c * HW_DATA_ALIGNMENT + f * dst_image_shape->width;
- if (f + HW_DATA_ALIGNMENT <= src_image_shape->features) {
- /* take 8 full features for each column and write them */
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, HW_DATA_ALIGNMENT * sizeof(T));
- }
- else {
- /* take the last 8 or less features, pad features to 8 and write */
- auto last_features = (src_features % HW_DATA_ALIGNMENT);
- auto remainder = (HW_DATA_ALIGNMENT - last_features);
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, last_features * sizeof(T));
- dst_offset += last_features;
- memset(dst_ptr + dst_offset, 0, remainder * sizeof(T));
- }
- }
- }
- }
-}
-
-template<typename T>
-void transform__d2h_F8CR(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
- uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
- uint32_t dst_features = dst_image_shape->features;
- uint32_t src_offset = 0;
- uint32_t dst_offset = 0;
-
- for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
- for (uint32_t c = 0; c < dst_image_shape->width; c++) {
- for (uint32_t f = 0; f < dst_image_shape->features; f+=8) {
- src_offset = r * src_row_size + c * HW_DATA_ALIGNMENT + f * src_image_shape->width;
- dst_offset = r * dst_row_size + c * dst_image_shape->features + f;
- if (f + HW_DATA_ALIGNMENT <= dst_image_shape->features) {
- /* copy the first dst_image_features (which are aligned to 8)! */
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, HW_DATA_ALIGNMENT * sizeof(T));
- }
- else {
- /* copy the last 8 or less features, remove pad */
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, (dst_features % HW_DATA_ALIGNMENT) * sizeof(T));
- }
- }
- }
- }
-}
-
-template<typename T>
-void transform__d2h_BAYER_RGB(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- uint32_t src_offset = 0;
- uint32_t dst_offset = 0;
-
- for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
- src_offset = r * src_image_shape->width;
- dst_offset = r * dst_image_shape->width;
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, dst_image_shape->width * sizeof(T));
- }
-}
-
-template<typename T>
-hailo_status transform__h2d_NHWC_to_RGB888(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- size_t src_offset = 0;
- size_t dst_offset = 0;
- uint32_t pad_size = (dst_image_shape->width - src_image_shape->width) * dst_image_shape->features;
-
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- CHECK(((RGB_FEATURES == src_image_shape->features) && ((RGB_FEATURES + 1) == dst_image_shape->features)),
- HAILO_INVALID_ARGUMENT,
- "User features must be {}, received {}. HW features must be {}, received {}",
- RGB_FEATURES, src_image_shape->features, RGB_FEATURES + 1, dst_image_shape->features);
-
- for (uint32_t r = 0; r < src_image_shape->height ; r++) {
- for (uint32_t c = 0; c < src_image_shape->width; c++) {
- src_offset = r * src_image_shape->width * src_image_shape->features + c * src_image_shape->features;
- dst_offset = r * dst_image_shape->width * dst_image_shape->features + c * dst_image_shape->features;
-
- /* Copy while flipping the data feature-wise */
- for (uint32_t f = 0; f < src_image_shape->features; f++) {
- dst_ptr[dst_offset + f] = src_ptr[src_offset + src_image_shape->features - f - 1];
- }
- /* add another zero byte */
- dst_ptr[dst_offset + RGB_FEATURES] = 0;
- }
- /* move dst_offset 4 features (RGB + 1 zero byte) and pad width if needed */
- memset(dst_ptr + dst_offset + RGB_FEATURES + 1, 0, pad_size * sizeof(T));
- }
-
- return HAILO_SUCCESS;
-}
-
-template<typename T>
-hailo_status transform__h2d_NCHW_to_NHCW(
- const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
- CHECK(src_image_shape->features == dst_image_shape->features, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform features src/dst should be the same");
- CHECK(src_image_shape->height == dst_image_shape->height, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform height src/dst should be the same");
- CHECK(src_image_shape->width <= dst_image_shape->width, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform src width should be smaller/equal than dst width");
- CHECK((dst_image_shape->width % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform dst width must be aligned to {}", HW_DATA_ALIGNMENT);
-
- size_t width_size = src_image_shape->width;
- size_t pad_size = (dst_image_shape->width - src_image_shape->width);
- for (uint32_t c = 0; c < src_image_shape->features; c++) {
- for (uint32_t r = 0; r < src_image_shape->height; r++) {
- // Copy width
- const T *src = src_ptr +
- src_image_shape->width * src_image_shape->height * c +
- src_image_shape->width * r;
- T *dst = dst_ptr +
- dst_image_shape->features * dst_image_shape->width * r +
- dst_image_shape->width * c;
-
- std::copy_n(src, width_size, dst);
- if (pad_size != 0) {
- std::fill_n(dst + width_size, pad_size, static_cast<T>(0));
- }
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-template<typename T>
-hailo_status transform__d2h_NHCW_to_NCHW(
- const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
- T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
- CHECK(src_image_shape->features == dst_image_shape->features, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform features src/dst should be the same");
- CHECK(src_image_shape->height == dst_image_shape->height, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform height src/dst should be the same");
- CHECK(dst_image_shape->width <= src_image_shape->width, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform dst width should be smaller/equal than src width");
- CHECK((src_image_shape->width % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
- "NCHW_to_NHCW Transform src width must be aligned to {}", HW_DATA_ALIGNMENT);
-
- size_t width_size = dst_image_shape->width;
- for (uint32_t r = 0; r < src_image_shape->height; r++) {
- for (uint32_t c = 0; c < src_image_shape->features; c++) {
- // Copy width
- T *dst = dst_ptr +
- dst_image_shape->width * dst_image_shape->height * c +
- dst_image_shape->width * r;
- const T *src = src_ptr +
- src_image_shape->features * src_image_shape->width * r +
- src_image_shape->width * c;
-
- std::copy_n(src, width_size, dst);
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-template<typename T>
-hailo_status transform__d2h_argmax_NHCW_to_NHW(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape,
- T *dst_ptr, const hailo_3d_image_shape_t &dst_image_shape)
-{
- assert(nullptr != src_ptr);
- assert(nullptr != dst_ptr);
-
- CHECK(src_image_shape.height == dst_image_shape.height, HAILO_INVALID_OPERATION,
- "NHCW_to_NHW argmax Transform is supported only when src height ({}) is equal to dst height ({})",
- src_image_shape.height, dst_image_shape.height);
- CHECK(src_image_shape.width >= dst_image_shape.width, HAILO_INVALID_OPERATION,
- "NHCW_to_NHW argmax Transform is supported only when src width ({}) is equal/larger than dst width ({})",
- src_image_shape.width, dst_image_shape.width);
- CHECK(dst_image_shape.features == 1, HAILO_INVALID_OPERATION,
- "NHCW_to_NHW argmax Transform is supported only when dst features ({}) is 1",
- dst_image_shape.features);
- CHECK(src_image_shape.features < std::numeric_limits<T>::max(), HAILO_INVALID_OPERATION,
- "NHCW_to_NHW argmax Transform is supported only when src features ({}) is smaller than {}",
- src_image_shape.features, std::numeric_limits<T>::max());
-
- const auto src_row_size = src_image_shape.width * src_image_shape.features;
- const auto dst_row_size = dst_image_shape.width;
- for (uint32_t r = 0; r < src_image_shape.height; r++) {
- // For each row, we iterate on all columns, and find the max feature. It can be implemented better by iteratre
- // over all features, and on each iteration save the max value for each column.
- const T *src_row = src_ptr + (r * src_row_size);
- T *dst_row = dst_ptr + (r * dst_row_size);
- for (uint32_t w = 0; w < dst_image_shape.width; w++) {
- const T *offset_in_row = src_row + w;
- T max_index = 0;
- T max_value = *offset_in_row;
-
- for (uint32_t c = 1; c < src_image_shape.features; c++) {
- offset_in_row += src_image_shape.width;
- const auto ¤t_value = *offset_in_row;
- if (current_value > max_value) {
- max_index = static_cast<T>(c);
- max_value = current_value;
- }
- }
-
- dst_row[w] = max_index;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-
-template<typename T>
-hailo_status transform__h2d_YUY2_to_YUY2(const T *src_ptr, T *dst_ptr, uint32_t shape_size)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- CHECK((shape_size % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
- "YUY2_to_YUY2 Transform shape_size must be aligned to {}", HW_DATA_ALIGNMENT);
-
- std::copy_n(src_ptr, shape_size, dst_ptr);
-
- return HAILO_SUCCESS;
-}
-
-template<typename T>
-hailo_status transform__h2d_RGB4_to_NHWC(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape, T *dst_ptr,
- const hailo_3d_image_shape_t &dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- const auto row_size = src_image_shape.width * src_image_shape.features;
- const auto src_row_size = HailoRTCommon::align_to(row_size, RGB4_ALIGNMENT);
- const auto dst_row_size = dst_image_shape.width * dst_image_shape.features;
-
- const auto pad_size = (dst_image_shape.width - src_image_shape.width) * dst_image_shape.features;
-
- uint32_t src_offset = 0;
- uint32_t dst_offset = 0;
-
- for (uint32_t r = 0; r < dst_image_shape.height; r++) {
- src_offset = r * src_row_size;
- dst_offset = r * dst_row_size;
- memcpy(dst_ptr + dst_offset, src_ptr + src_offset, src_row_size * sizeof(T));
- if (pad_size != 0) {
- std::fill_n(dst_ptr + dst_offset + src_row_size, pad_size, static_cast<T>(0));
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-template<typename T>
-hailo_status transform__h2d_RGB4_to_NHCW(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape, T *dst_ptr,
- const hailo_3d_image_shape_t &dst_image_shape)
-{
- /* Validate arguments */
- ASSERT(NULL != src_ptr);
- ASSERT(NULL != dst_ptr);
-
- const auto row_size = src_image_shape.width * src_image_shape.features;
- const auto src_row_size = HailoRTCommon::align_to(row_size, RGB4_ALIGNMENT);
- const auto dst_row_size = dst_image_shape.width * dst_image_shape.features;
-
- const auto pad_size = (dst_image_shape.width - src_image_shape.width) * dst_image_shape.features;
-
- uint32_t src_offset = 0;
- uint32_t dst_offset = 0;
-
- for (uint32_t r = 0; r < src_image_shape.height ; r++) {
- /* transpose - switch width and channels */
- for (uint32_t f = 0; f < src_image_shape.features; f++) {
- for (uint32_t c = 0; c < src_image_shape.width; c++) {
- src_offset = r * src_row_size + c * src_image_shape.features + f;
- dst_offset = r * dst_row_size + f * dst_image_shape.width + c;
- dst_ptr[dst_offset] = src_ptr[src_offset];
- }
- /* pad feature to 8 elemnts */
- if (pad_size != 0) {
- dst_offset = r * dst_row_size + f * dst_image_shape.width + src_image_shape.width;
- std::fill_n(dst_ptr + dst_offset, pad_size, static_cast<T>(0));
- }
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status InputTransformContext::quantize_stream(const void *src_ptr, void *quant_buffer)
-{
- auto shape_size = HailoRTCommon::get_shape_size(m_src_image_shape);
-
- switch (m_src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- if (m_dst_format.type == HAILO_FORMAT_TYPE_UINT8) {
- Quantization::quantize_input_buffer<uint8_t, uint8_t>((uint8_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_info);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- if (m_dst_format.type == HAILO_FORMAT_TYPE_UINT16) {
- Quantization::quantize_input_buffer<uint16_t, uint16_t>((uint16_t*)src_ptr, (uint16_t *)quant_buffer, shape_size, m_dst_quant_info);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- case HAILO_FORMAT_TYPE_FLOAT32:
- if (m_dst_format.type == HAILO_FORMAT_TYPE_UINT8) {
- Quantization::quantize_input_buffer<float32_t, uint8_t>((float32_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_info);
- }
- else if (m_dst_format.type == HAILO_FORMAT_TYPE_UINT16) {
- Quantization::quantize_input_buffer<float32_t, uint16_t>((float32_t*)src_ptr, (uint16_t*)quant_buffer, shape_size, m_dst_quant_info);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
-}
-
-hailo_status FrameOutputTransformContext::quantize_stream(const void *dst_ptr)
-{
- auto shape_size = HailoRTCommon::get_shape_size(m_dst_image_shape);
-
- switch (m_dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
- Quantization::dequantize_output_buffer_in_place<uint8_t, uint8_t>((uint8_t*)dst_ptr, shape_size, m_dst_quant_info);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
- Quantization::dequantize_output_buffer_in_place<uint16_t, uint16_t>((uint16_t*)dst_ptr, shape_size, m_dst_quant_info);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- case HAILO_FORMAT_TYPE_FLOAT32:
- /* if output layer is argmax - do not rescale */
- if (HAILO_FORMAT_ORDER_NHW != m_dst_format.order) {
- if (m_src_format.type == HAILO_FORMAT_TYPE_UINT8) {
- Quantization::dequantize_output_buffer_in_place<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_info);
- }
- else if (m_src_format.type == HAILO_FORMAT_TYPE_UINT16) {
- Quantization::dequantize_output_buffer_in_place<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_info);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- } else {
- if (m_src_format.type == HAILO_FORMAT_TYPE_UINT8) {
- cast_elements_inplace<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size);
- }
- else if (m_src_format.type == HAILO_FORMAT_TYPE_UINT16) {
- cast_elements_inplace<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- }
- break;
- default:
- LOGGER__ERROR("Invalid dst-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status reorder_input_stream(const void *src_ptr, hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
- void *dst_ptr, hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
-{
- if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_NHWC_to_NHCW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_NHWC_to_NHCW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_NC == src_format.order) &&
- (HAILO_FORMAT_ORDER_NC == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_NC_to_NC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_NC_to_NC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if (((HAILO_FORMAT_ORDER_FCR == src_format.order) || (HAILO_FORMAT_ORDER_NHWC == src_format.order)) &&
- (HAILO_FORMAT_ORDER_FCR == dst_format.order)) {
- assert(0 == (dst_image_shape.features % 8));
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_FCR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_FCR<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if (((HAILO_FORMAT_ORDER_F8CR == src_format.order) || (HAILO_FORMAT_ORDER_NHWC == src_format.order)) &&
- (HAILO_FORMAT_ORDER_F8CR == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_F8CR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_F8CR<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
- (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
- assert(1 == src_image_shape.features);
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == src_format.order) &&
- (HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == dst_format.order)) {
- assert(1 == src_image_shape.features);
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
- (HAILO_FORMAT_ORDER_RGB888 == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- return transform__h2d_NHWC_to_RGB888<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- return transform__h2d_NHWC_to_RGB888<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- if ((HAILO_FORMAT_ORDER_NCHW == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- return transform__h2d_NCHW_to_NHCW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- return transform__h2d_NCHW_to_NHCW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- if ((HAILO_FORMAT_ORDER_YUY2 == src_format.order) &&
- (HAILO_FORMAT_ORDER_YUY2 == dst_format.order)) {
- auto shape_size = HailoRTCommon::get_shape_size(src_image_shape);
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- return transform__h2d_YUY2_to_YUY2<uint8_t>((uint8_t*)src_ptr, (uint8_t*)dst_ptr, shape_size);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- return transform__h2d_YUY2_to_YUY2<uint16_t>((uint16_t*)src_ptr, (uint16_t*)dst_ptr, shape_size);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- if (((HAILO_FORMAT_ORDER_NV12 == src_format.order) &&
- (HAILO_FORMAT_ORDER_HAILO_YYUV) == dst_format.order) ||
- ((HAILO_FORMAT_ORDER_NV21 == src_format.order) &&
- (HAILO_FORMAT_ORDER_HAILO_YYVU) == dst_format.order)) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_NV12_to_NV12<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_NV12_to_NV12<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format {}", src_format.type);
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_RGB4 == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_RGB4_to_NHWC<uint8_t>((uint8_t*)src_ptr, src_image_shape, (uint8_t*)dst_ptr, dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_RGB4_to_NHWC<uint16_t>((uint16_t*)src_ptr, src_image_shape, (uint16_t*)dst_ptr, dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- if ((HAILO_FORMAT_ORDER_RGB4 == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
- switch (dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__h2d_RGB4_to_NHCW<uint8_t>((uint8_t*)src_ptr, src_image_shape, (uint8_t*)dst_ptr, dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__h2d_RGB4_to_NHCW<uint16_t>((uint16_t*)src_ptr, src_image_shape, (uint16_t*)dst_ptr, dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- return HAILO_SUCCESS;
- }
-
- LOGGER__ERROR("Unsupported input stream transformation from hailo_format_order_t "
- "{} to hailo_format_order_t {}", src_format.order, dst_format.order);
- return HAILO_INVALID_OPERATION;
-}
-
-hailo_status reorder_output_stream(const void *src_ptr, hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
- void *dst_ptr, hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
-{
- if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NHCW_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NHCW_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
- else if ((HAILO_FORMAT_ORDER_NC == src_format.order) &&
- (HAILO_FORMAT_ORDER_NC == dst_format.order)) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NC_to_NC<uint8_t>((uint8_t*)src_ptr, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NC_to_NC<uint16_t>((uint16_t*)src_ptr, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
- else if ((HAILO_FORMAT_ORDER_NHW == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHW == dst_format.order)) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NHW_to_NHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NHW_to_NHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
- else if ((HAILO_FORMAT_ORDER_FCR == src_format.order) &&
- ((HAILO_FORMAT_ORDER_FCR == dst_format.order) || (HAILO_FORMAT_ORDER_NHWC == dst_format.order))) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
- else if ((HAILO_FORMAT_ORDER_F8CR == src_format.order) &&
- ((HAILO_FORMAT_ORDER_F8CR == dst_format.order) || (HAILO_FORMAT_ORDER_NHWC == dst_format.order))) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_F8CR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_F8CR<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
- else if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
- (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
- assert((1 == src_image_shape.features) && (1 == dst_image_shape.features));
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_BAYER_RGB<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_BAYER_RGB<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
- (HAILO_FORMAT_ORDER_NCHW) == dst_format.order) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NHCW_to_NCHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NHCW_to_NCHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_NHW == src_format.order) &&
- (HAILO_FORMAT_ORDER_NCHW) == dst_format.order) {
-
- CHECK((src_image_shape.features == 1) && (dst_image_shape.features == 1), HAILO_INVALID_ARGUMENT,
- "Invalid number of features. Expected 1, received hw: {}, user: {}",
- src_image_shape.features, dst_image_shape.features);
- switch (src_format.type) {
- // We call for transform__d2h_NHW_to_NHW function since NCHW is the same as NHW when the the image's features = 1.
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NHW_to_NHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NHW_to_NHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHW == dst_format.order) &&
- (0 != (HAILO_FORMAT_FLAGS_HOST_ARGMAX & src_format.flags))) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- return transform__d2h_argmax_NHCW_to_NHW<uint8_t>((uint8_t*)src_ptr, src_image_shape, (uint8_t*)dst_ptr, dst_image_shape);
- case HAILO_FORMAT_TYPE_UINT16:
- return transform__d2h_argmax_NHCW_to_NHW<uint16_t>((uint16_t*)src_ptr, src_image_shape, (uint16_t*)dst_ptr, dst_image_shape);
- default:
- LOGGER__ERROR("Invalid src-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
- (HAILO_FORMAT_ORDER_NHWC) == dst_format.order) {
- switch (src_format.type) {
- case HAILO_FORMAT_TYPE_UINT8:
- transform__d2h_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
- break;
- case HAILO_FORMAT_TYPE_UINT16:
- transform__d2h_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
- break;
- default:
- LOGGER__ERROR("Invalid src-buffer's type format {}", src_format.type);
- return HAILO_INVALID_ARGUMENT;
- }
- } else {
- LOGGER__ERROR("Unsupported output stream transformation from hailo_format_order_t "
- "{} to hailo_format_order_t {}", HailoRTCommon::get_format_order_str(src_format.order),
- HailoRTCommon::get_format_order_str(dst_format.order));
- return HAILO_INVALID_OPERATION;
- }
-
- return HAILO_SUCCESS;
-}
-
-/* Public funcs */
-hailo_status InputTransformContext::transform_inner(const void *src_ptr, void *quant_buffer, void *dst_ptr,
- MemoryView transpose_buffer)
-{
- void *orig_dst_ptr = nullptr;
- hailo_3d_image_shape_t transposed_image_shape = m_src_image_shape;
- hailo_format_t quantized_src_format = m_src_format;
-
- if (!(m_should_quantize || m_should_transpose || m_should_reorder)) {
- /* If transform was created without any actual use - just copy src_ptr to dst_ptr */
- LOGGER__WARN("Transformer was created, but not needed and can be removed. copies src buffer to dst buffer");
- auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
- memcpy(dst_ptr, src_ptr, frame_size);
- return HAILO_SUCCESS;
- }
-
- if (m_should_quantize) {
- /* If final step - output of this quant func is the dst_ptr */
- orig_dst_ptr = (m_should_transpose || m_should_reorder) ? quant_buffer : dst_ptr;
- auto status = quantize_stream(src_ptr, orig_dst_ptr);
- CHECK_SUCCESS(status);
- src_ptr = orig_dst_ptr;
- quantized_src_format.type = m_dst_format.type;
- }
-
- if (!(m_should_transpose || m_should_reorder)) {
- /* If quantize is the only step - need to copy src buffer to dst buffer */
- auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
- memcpy(dst_ptr, src_ptr, frame_size);
- return HAILO_SUCCESS;
- }
-
- if (m_should_transpose) {
- if (transpose_buffer.empty()) {
- LOGGER__ERROR("Transpose buffer not given");
- return HAILO_INVALID_ARGUMENT;
- }
-
- if (transpose_buffer.size() != HailoRTCommon::get_frame_size(m_src_image_shape, quantized_src_format)) {
- LOGGER__ERROR("Transpose buffer size mismatch (expected {}, actual {})",
- HailoRTCommon::get_frame_size(m_src_image_shape, quantized_src_format), transpose_buffer.size());
- return HAILO_INVALID_ARGUMENT;
- }
-
- /* If final step - output of this quant func is the dst_ptr */
- orig_dst_ptr = (m_should_reorder) ? transpose_buffer.data() : dst_ptr;
- auto status = transform__transpose_buffer(src_ptr, m_src_image_shape, quantized_src_format, orig_dst_ptr);
- CHECK_SUCCESS(status);
-
- src_ptr = transpose_buffer.data();
- transposed_image_shape = transposed_shape(m_src_image_shape);
- }
-
- if (m_should_reorder){
- auto status = reorder_input_stream(src_ptr, transposed_image_shape, quantized_src_format, dst_ptr,
- m_dst_image_shape, m_dst_format);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status FrameOutputTransformContext::transform_inner(const void *src_ptr, void *dst_ptr, MemoryView transpose_buffer)
-{
- hailo_format_t transposed_format = m_dst_format;
- hailo_3d_image_shape_t transposed_image_shape = m_dst_image_shape;
- transposed_format.type = m_src_format.type;
-
- void *orig_dst_ptr = nullptr;
- void *orig_src_ptr = nullptr;
-
- if (!(m_should_quantize || m_should_transpose || m_should_reorder)) {
- /* If transform context was created without any actual use - just copy src_ptr to dst_ptr */
- LOGGER__WARN("Transform context was created, but not needed and can be removed. copies src buffer to dst buffer");
- auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
- memcpy(dst_ptr, src_ptr, frame_size);
- return HAILO_SUCCESS;
- }
-
- if (m_should_reorder) {
- if (m_should_transpose) {
- /* If user needs to reorder and transform - the output of the reorder is the transform buffer*/
- if (transpose_buffer.empty()) {
- LOGGER__ERROR("Transpose buffer not given");
- return HAILO_INVALID_ARGUMENT;
- }
-
- if (transpose_buffer.size() != HailoRTCommon::get_frame_size(m_dst_image_shape, transposed_format)) {
- LOGGER__ERROR("Transpose buffer size mismatch (expected {}, actual {})",
- HailoRTCommon::get_frame_size(m_dst_image_shape, transposed_format), transpose_buffer.size());
- return HAILO_INVALID_ARGUMENT;
- }
-
- // Prepare transpose - the order transformation will be applied to the transpose buffer, later we will transpose
- // from dst_ptr (transpose_buffer) to orig_dst_ptr (user buffer)
- orig_dst_ptr = transpose_buffer.data();
- transposed_image_shape = transposed_shape(m_dst_image_shape);
- } else {
- orig_dst_ptr = dst_ptr;
- }
- auto status = reorder_output_stream(src_ptr, m_src_image_shape, m_src_format, orig_dst_ptr, transposed_image_shape,
- m_dst_format);
- CHECK_SUCCESS(status);
- }
-
- if (m_should_transpose) {
- orig_src_ptr = (m_should_reorder) ? orig_dst_ptr : const_cast<void *>(src_ptr);
- auto status = transform__transpose_buffer(orig_src_ptr, transposed_image_shape, transposed_format, dst_ptr);
- CHECK_SUCCESS(status);
-
- transposed_image_shape = transposed_shape(transposed_image_shape);
- }
-
- if (m_should_quantize) {
- auto status = quantize_stream(dst_ptr);
- CHECK_SUCCESS(status);
- }
-
- if (!(m_should_transpose || m_should_reorder)) {
- /* If quantize is the only step - need to copy src buffer to dst buffer */
- auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
- memcpy(dst_ptr, src_ptr, frame_size);
- }
-
- return HAILO_SUCCESS;
-}
-
-
-hailo_status transform_demux_raw_frame(const void *src, uint32_t offset,
- hailo_mux_info_t *mux_info, uint32_t mux_row_count)
-{
- // This is a recursive function with a maximum depth of HailoRTCommon::MUX_INFO_COUNT.
- hailo_status status = HAILO_UNINITIALIZED;
- struct hailo_mux_info_t *predecessor = NULL;
- uint32_t row_size = 0;
-
- CHECK_ARG_NOT_NULL(src);
-
- for (uint32_t i = 0; i < mux_row_count; i++) {
- for (uint32_t j = 0; j < mux_info->successors_count; j++) {
- predecessor = mux_info->successors[j];
- row_size = predecessor->row_size;
-
- if ((predecessor->info.is_mux) && (i < predecessor->rows_gcd)) {
- status = transform_demux_raw_frame(src, offset, predecessor, predecessor->info.hw_shape.height / mux_info->rows_gcd);
- CHECK_SUCCESS(status);
- }
-
- if (!(predecessor->info.is_mux)) {
- if (predecessor->row_counter < predecessor->info.shape.height) {
- memcpy((uint8_t*)predecessor->buffer + predecessor->current_offset, (uint8_t*)src + offset, row_size);
- predecessor->current_offset += row_size;
- }
-
- predecessor->row_counter++;
- if (predecessor->row_counter == (predecessor->info.hw_shape.height + 1)) {
- predecessor->row_counter = 0;
- }
- }
-
- offset += row_size;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status validate_input_transform_params(hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
- hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
-{
- /* Check quantize flags - where quantize is no needed */
- if ((HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags)) {
- LOGGER__ERROR("Cannot dequantize input data");
- return HAILO_INVALID_ARGUMENT;
- }
-
- if ((HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && (HAILO_FORMAT_TYPE_FLOAT32 == src_format.type)) {
- LOGGER__ERROR("float32 data isn't quantized");
- return HAILO_INVALID_ARGUMENT;
- }
-
- /* Check for overscale transformation*/
- CHECK((hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == src_format.type) || (src_format.type >= dst_format.type),
- HAILO_INVALID_ARGUMENT, "Overscale transformation is not supported");
-
- /* Check device type */
- if (!((HAILO_FORMAT_TYPE_UINT16 == dst_format.type) || (HAILO_FORMAT_TYPE_UINT8 == dst_format.type))) {
- LOGGER__ERROR("unsupported device type {}", dst_format.type);
- return HAILO_INVALID_ARGUMENT;
- }
-
- /* Check reorder flags - where no reorder is needed */
- if ((HAILO_FORMAT_ORDER_FCR == src_format.order) &&
- (HAILO_FORMAT_ORDER_FCR == dst_format.order)) {
- if (0 != (dst_image_shape.features % 8)) {
- LOGGER__ERROR("HW features must be aligned to {}. passed hw features - {}",
- HW_DATA_ALIGNMENT, dst_image_shape.features);
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
- (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
- if (src_image_shape.features != 1) {
- LOGGER__ERROR("Invalid Bayer user features. Expected 1, received {}", src_image_shape.features);
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == src_format.order) &&
- (HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == dst_format.order)) {
- if (src_image_shape.features != 1) {
- LOGGER__ERROR("Invalid Bayer user features. Expected 1, received {}", src_image_shape.features);
- return HAILO_INVALID_ARGUMENT;
- }
- } else if ((HAILO_FORMAT_ORDER_YUY2 == src_format.order) &&
- (HAILO_FORMAT_ORDER_YUY2 == dst_format.order)) {
- auto shape_size = HailoRTCommon::get_shape_size(src_image_shape);
- CHECK((shape_size % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
- "YUY2_to_YUY2 Transform shape_size must be aligned to {}", HW_DATA_ALIGNMENT);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status validate_output_transform_params(hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
- hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
-{
- /* Check quantize flags - where quantize is no needed */
- if (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags)) {
- LOGGER__ERROR("Cannot quantize output data");
- return HAILO_INVALID_ARGUMENT;
- }
-
- /* Check device type */
- if (!((HAILO_FORMAT_TYPE_UINT16 == src_format.type) || (HAILO_FORMAT_TYPE_UINT8 == src_format.type))) {
- LOGGER__ERROR("unsupported device type {}", dst_format.type);
- return HAILO_INVALID_ARGUMENT;
- }
-
- /* Check for underscale transformation*/
- CHECK((hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == dst_format.type) || (src_format.type <= dst_format.type),
- HAILO_INVALID_ARGUMENT, "Underscale transformation is not supported");
-
- /* Check reorder flags - where no reorder is needed */
- if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
- (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
- if ((src_image_shape.features != 1) || (dst_image_shape.features != 1)) {
- LOGGER__ERROR("Invalid Bayer user or hw features. Expected 1, received user: {}, hw: {}",
- src_image_shape.features, dst_image_shape.features);
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-bool InputTransformContext::is_transformation_required(
- const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
- const hailo_quant_info_t &quant_info)
-{
- auto host_format = HailoRTDefaults::expand_auto_format(src_format, dst_format);
- return TransformContextUtils::is_transformation_required(HAILO_H2D_STREAM, src_image_shape, host_format,
- dst_image_shape, dst_format, quant_info);
-}
-
-std::string InputTransformContext::description() const
-{
- std::stringstream transform_description;
- bool first = true;
-
- if (m_should_quantize) {
- if (!first) {
- transform_description << " | ";
- } else {
- first = false;
- }
- transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
- }
-
- if (m_should_transpose) {
- if (!first) {
- transform_description << " | ";
- } else {
- first = false;
- }
- transform_description << TransformContextUtils::make_transpose_description(m_src_image_shape, transposed_shape(m_src_image_shape));
- }
-
- if (m_should_reorder) {
- if (!first) {
- transform_description << " | ";
- } else {
- first = false;
- }
- transform_description << TransformContextUtils::make_reorder_description(m_src_format.order, m_src_image_shape, m_dst_format.order, m_dst_image_shape);
- }
-
- return transform_description.str();
-}
-
-Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
-{
- auto status = validate_input_transform_params(src_image_shape, src_format, dst_image_shape, dst_format);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- const auto internal_src_format = HailoRTDefaults::expand_auto_format(src_format, dst_format);
-
- const auto src_frame_size = HailoRTCommon::get_frame_size(src_image_shape, internal_src_format);
- const auto dst_frame_size = HailoRTCommon::get_frame_size(dst_image_shape, dst_format);
-
- Buffer quant_buffer;
- bool should_quantize = TransformContextUtils::should_quantize(HAILO_H2D_STREAM, src_format, dst_format,
- dst_quant_info);
- if (should_quantize) {
- auto expected_quant_buffer = Buffer::create(src_frame_size, 0);
- CHECK_EXPECTED(expected_quant_buffer);
- quant_buffer = expected_quant_buffer.release();
- }
-
- Buffer transpose_buffer;
- bool should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
- if (should_transpose) {
- auto expected_transpose_buffer = Buffer::create(get_transpose_buffer_size(src_image_shape,
- dst_format.type));
- CHECK_EXPECTED(expected_transpose_buffer);
- transpose_buffer = expected_transpose_buffer.release();
- }
-
- auto should_reorder = TransformContextUtils::should_reorder(src_image_shape, src_format, dst_image_shape, dst_format);
-
- std::unique_ptr<InputTransformContext> transform_context(new (std::nothrow) InputTransformContext(src_frame_size, src_image_shape,
- internal_src_format, dst_frame_size, dst_image_shape, dst_format, dst_quant_info, std::move(quant_buffer),
- std::move(transpose_buffer), should_quantize, should_transpose, should_reorder));
- CHECK_AS_EXPECTED(nullptr != transform_context, HAILO_OUT_OF_HOST_MEMORY);
-
- return transform_context;
-}
-
-Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_stream_info_t &stream_info,
- const hailo_transform_params_t &transform_params)
-{
- return create(stream_info.shape, transform_params.user_buffer_format, stream_info.hw_shape, stream_info.format,
- stream_info.quant_info);
-}
-
-Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_stream_info_t &stream_info, bool quantized,
- hailo_format_type_t format_type)
-{
- return create(stream_info, HailoRTDefaults::get_transform_params(quantized, format_type));
-}
-
-InputTransformContext::InputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer &&quant_buffer,
- Buffer &&transpose_buffer,const bool should_quantize, const bool should_transpose, const bool should_reorder) :
- m_src_frame_size(src_frame_size),
- m_src_image_shape(src_image_shape),
- m_src_format(src_format),
- m_dst_frame_size(dst_frame_size),
- m_dst_image_shape(dst_image_shape),
- m_dst_format(dst_format),
- m_dst_quant_info(dst_quant_info),
- m_should_quantize(should_quantize),
- m_should_transpose(should_transpose),
- m_should_reorder(should_reorder),
- m_quant_buffer(std::move(quant_buffer)),
- m_transpose_buffer(std::move(transpose_buffer))
-{}
-
-hailo_status InputTransformContext::transform(const MemoryView src, MemoryView dst)
-{
- /* Check sizes */
- CHECK(src.size() == m_src_frame_size, HAILO_INVALID_ARGUMENT,
- "src size must be {}. passed size - {}", m_src_frame_size, src.size());
- CHECK(dst.size() == m_dst_frame_size, HAILO_INVALID_ARGUMENT,
- "dst_size must be {}. passed size - {}", m_dst_frame_size, dst.size());
-
- hailo_status status = transform_inner(src.data(),
- quant_buffer().data(), dst.data(), transpose_buffer());
- CHECK_SUCCESS(status);
- return HAILO_SUCCESS;
-}
-
-size_t InputTransformContext::get_src_frame_size() const
-{
- return m_src_frame_size;
-}
-
-size_t InputTransformContext::get_dst_frame_size() const
-{
- return m_dst_frame_size;
-}
-
-bool OutputTransformContext::is_transformation_required(
- const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
- const hailo_quant_info_t &quant_info)
-{
- auto host_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
- return TransformContextUtils::is_transformation_required(HAILO_D2H_STREAM, src_image_shape, src_format,
- dst_image_shape, host_format, quant_info);
-}
-
-Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
-{
- auto status = validate_output_transform_params(src_image_shape, src_format, dst_image_shape, dst_format);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- if (HAILO_FORMAT_ORDER_HAILO_NMS == src_format.order) {
- return NMSOutputTransformContext::create(src_format, dst_format, dst_quant_info, nms_info);
- }
-
- return FrameOutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info);
-}
-
-Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_stream_info_t &stream_info,
- const hailo_transform_params_t &transform_params)
-{
- return create(stream_info.hw_shape, stream_info.format, stream_info.shape,
- transform_params.user_buffer_format, stream_info.quant_info, stream_info.nms_info);
-}
-
-Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_stream_info_t &stream_info, bool quantized,
- hailo_format_type_t format_type)
-{
- return create(stream_info, HailoRTDefaults::get_transform_params(quantized, format_type));
-}
-
-OutputTransformContext::OutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const bool should_quantize,
- const bool should_transpose, const bool should_reorder) :
- m_src_frame_size(src_frame_size),
- m_src_format(src_format),
- m_dst_frame_size(dst_frame_size),
- m_dst_format(dst_format),
- m_dst_quant_info(dst_quant_info),
- m_should_quantize(should_quantize),
- m_should_transpose(should_transpose),
- m_should_reorder(should_reorder)
-{}
-
-FrameOutputTransformContext::FrameOutputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer&& transpose_buffer,
- const bool should_quantize, const bool should_transpose, const bool should_reorder) :
- OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_info, should_quantize,
- should_transpose, should_reorder), m_src_image_shape(src_image_shape), m_dst_image_shape(dst_image_shape),
- m_transpose_buffer(std::move(transpose_buffer))
-{}
-
-Expected<std::unique_ptr<OutputTransformContext>> FrameOutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
-{
- const auto internal_dst_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
-
- const auto src_frame_size = HailoRTCommon::get_frame_size(src_image_shape, src_format);
- const auto dst_frame_size = HailoRTCommon::get_frame_size(dst_image_shape, internal_dst_format);
-
- auto should_quantize = TransformContextUtils::should_quantize(HAILO_D2H_STREAM, src_format, dst_format,
- dst_quant_info);
-
- Buffer transpose_buffer;
- auto should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
- if (should_transpose) {
- auto expected_transpose_buffer = Buffer::create(get_transpose_buffer_size(dst_image_shape, src_format.type));
- CHECK_EXPECTED(expected_transpose_buffer);
- transpose_buffer = expected_transpose_buffer.release();
- }
-
- auto should_reorder = TransformContextUtils::should_reorder(src_image_shape, src_format, dst_image_shape, dst_format);
-
- std::unique_ptr<OutputTransformContext> frame_transform_context = std::make_unique<FrameOutputTransformContext>(src_frame_size,
- src_image_shape, src_format, dst_frame_size, dst_image_shape, internal_dst_format, dst_quant_info, std::move(transpose_buffer),
- should_quantize, should_transpose, should_reorder);
-
- CHECK_AS_EXPECTED(nullptr != frame_transform_context, HAILO_OUT_OF_HOST_MEMORY);
-
- return frame_transform_context;
-}
-
-NMSOutputTransformContext::NMSOutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format,
- size_t dst_frame_size, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
- const hailo_nms_info_t &nms_info, Buffer &&quant_buffer, const bool should_quantize, const bool should_transpose) :
- OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_info, should_quantize ,should_transpose,
- true), m_nms_info(nms_info), m_chunk_offsets(nms_info.chunks_per_frame, 0), m_quant_buffer(std::move(quant_buffer))
-{}
-
-Expected<std::unique_ptr<OutputTransformContext>> NMSOutputTransformContext::create(const hailo_format_t &src_format,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
-{
- // Validate params
- CHECK_AS_EXPECTED(HAILO_FORMAT_ORDER_HAILO_NMS == src_format.order, HAILO_INVALID_ARGUMENT,
- "Format order should be HAILO_FORMAT_ORDER_HAILO_NMS");
-
- const auto internal_dst_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
-
- CHECK_AS_EXPECTED(HAILO_FORMAT_ORDER_HAILO_NMS == internal_dst_format.order, HAILO_INVALID_ARGUMENT,
- "Format order should be HAILO_FORMAT_ORDER_HAILO_NMS");
-
- if (internal_dst_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) {
- CHECK_AS_EXPECTED(HAILO_FORMAT_TYPE_UINT16 == internal_dst_format.type, HAILO_INVALID_ARGUMENT,
- "Format order HAILO_FORMAT_ORDER_HAILO_NMS without quantization is allowed only with type HAILO_FORMAT_TYPE_UINT16");
- }
- else {
- CHECK_AS_EXPECTED((HAILO_FORMAT_TYPE_UINT16 == internal_dst_format.type) || (HAILO_FORMAT_TYPE_FLOAT32 == internal_dst_format.type),
- HAILO_INVALID_ARGUMENT,
- "Format order HAILO_FORMAT_ORDER_HAILO_NMS with quantization is allowed only with type HAILO_FORMAT_TYPE_UINT16 or HAILO_FORMAT_TYPE_FLOAT32");
- }
-
- const auto src_frame_size = HailoRTCommon::get_nms_hw_frame_size(nms_info);
- auto dst_frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, internal_dst_format);
-
- Buffer quant_buffer;
- const bool should_quantize = (src_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) &&
- !(internal_dst_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED);
- if (should_quantize) {
- dst_frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, internal_dst_format);
- auto expected_nms_quant_buffer = Buffer::create(dst_frame_size, 0);
- CHECK_EXPECTED(expected_nms_quant_buffer);
- quant_buffer = expected_nms_quant_buffer.release();
- }
-
- auto should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
-
- std::unique_ptr<OutputTransformContext> nms_transform_context = std::make_unique<NMSOutputTransformContext>(src_frame_size,
- src_format, dst_frame_size, internal_dst_format, dst_quant_info, nms_info, std::move(quant_buffer),
- should_quantize, should_transpose);
- CHECK_AS_EXPECTED(nullptr != nms_transform_context, HAILO_OUT_OF_HOST_MEMORY);
-
- return nms_transform_context;
-}
-
-hailo_status FrameOutputTransformContext::transform(const MemoryView src, MemoryView dst)
-{
- /* Check sizes */
- CHECK(src.size() == m_src_frame_size, HAILO_INVALID_ARGUMENT,
- "src size must be {}. passed size - {}", m_src_frame_size, src.size());
- CHECK(dst.size() == m_dst_frame_size, HAILO_INVALID_ARGUMENT,
- "dst_size must be {}. passed size - {}", m_dst_frame_size, dst.size());
-
- auto status = transform_inner(src.data(), dst.data(), MemoryView(m_transpose_buffer));
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status NMSOutputTransformContext::transform(const MemoryView src, MemoryView dst)
-{
- /* Check sizes */
- CHECK(src.size() == m_src_frame_size, HAILO_INVALID_ARGUMENT,
- "src size must be {}. passed size - {}", m_src_frame_size, src.size());
- CHECK(dst.size() == m_dst_frame_size, HAILO_INVALID_ARGUMENT,
- "dst_size must be {}. passed size - {}", m_dst_frame_size, dst.size());
-
- assert((HAILO_FORMAT_ORDER_HAILO_NMS == m_src_format.order) && (HAILO_FORMAT_ORDER_HAILO_NMS == m_dst_format.order));
-
- auto shape_size = HailoRTCommon::get_nms_host_shape_size(m_nms_info);
-
- if (!(HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags)) {
- LOGGER__ERROR("Cannot quantize output data");
- return HAILO_INVALID_OPERATION;
- }
-
- if ((HAILO_FORMAT_FLAGS_TRANSPOSED & m_src_format.flags) || (HAILO_FORMAT_FLAGS_TRANSPOSED & m_dst_format.flags)) {
- LOGGER__ERROR("NMS doesn't support transposed format currently");
- return HAILO_INVALID_OPERATION;
- }
-
- if (!((HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) &&
- !(HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags))) {
- transform__d2h_NMS((uint8_t*)src.data(), (uint8_t*)dst.data(), m_nms_info, m_chunk_offsets);
- }
- else {
- transform__d2h_NMS((uint8_t*)src.data(), m_quant_buffer.data(), m_nms_info, m_chunk_offsets);
- }
-
- if ((HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags)) {
- // NMS has to be uint16 or float32
- switch (m_dst_format.type) {
- case HAILO_FORMAT_TYPE_UINT16:
- if (m_src_format.type == HAILO_FORMAT_TYPE_UINT16) {
- Quantization::dequantize_output_buffer_nms<uint16_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
- (uint16_t*)dst.data(), shape_size, m_dst_quant_info, m_nms_info.number_of_classes);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- case HAILO_FORMAT_TYPE_FLOAT32:
- if (m_src_format.type == HAILO_FORMAT_TYPE_UINT16) {
- Quantization::dequantize_output_buffer_nms<float32_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
- (float32_t*)dst.data(), shape_size, m_dst_quant_info, m_nms_info.number_of_classes);
- }
- else {
- return HAILO_INVALID_OPERATION;
- }
- break;
- default:
- LOGGER__ERROR("Invalid dst-buffer's type format");
- return HAILO_INVALID_ARGUMENT;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-std::string FrameOutputTransformContext::description() const
-{
- std::stringstream transform_description;
- bool first = true;
-
- if (m_should_quantize) {
- if (!first) {
- transform_description << " | ";
- } else {
- first = false;
- }
- transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
- }
-
- if (m_should_transpose) {
- if (!first) {
- transform_description << " | ";
- } else {
- first = false;
- }
- transform_description << TransformContextUtils::make_transpose_description(m_src_image_shape, transposed_shape(m_src_image_shape));
- }
-
- if (m_should_reorder) {
- if (!first) {
- transform_description << " | ";
- } else {
- first = false;
- }
- transform_description << TransformContextUtils::make_reorder_description(m_src_format.order, m_src_image_shape, m_dst_format.order, m_dst_image_shape);
- }
-
- return transform_description.str();
-}
-
-std::string NMSOutputTransformContext::description() const
-{
- std::stringstream transform_description;
-
- transform_description << "number_of_classes: " << m_nms_info.number_of_classes <<
- ", max_bboxes_per_class: " << m_nms_info.max_bboxes_per_class;
-
- if (m_should_quantize) {
- transform_description << " | " <<
- TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
- }
-
- return transform_description.str();
-}
-
-size_t OutputTransformContext::get_src_frame_size() const
-{
- return m_src_frame_size;
-}
-
-size_t OutputTransformContext::get_dst_frame_size() const
-{
- return m_dst_frame_size;
-}
-
-Expected<std::unique_ptr<OutputDemuxer>> OutputDemuxer::create(OutputStream &output_stream)
-{
- auto obj = OutputDemuxerBase::create(output_stream.get_frame_size(), output_stream.get_layer_info());
- CHECK_EXPECTED(obj);
-
- auto obj_ptr = make_unique_nothrow<OutputDemuxerBase>(obj.release());
- CHECK_AS_EXPECTED(nullptr != obj_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return Expected<std::unique_ptr<OutputDemuxer>>(std::move(obj_ptr));
-}
-
-Expected<OutputDemuxerBase> OutputDemuxerBase::create(size_t src_frame_size, const LayerInfo &layer_info)
-{
- // Validate params
- CHECK_AS_EXPECTED((HAILO_FORMAT_ORDER_HAILO_NMS != layer_info.format.order), HAILO_INVALID_OPERATION,
- "NMS layer does not support mux.");
-
- auto mux_infos = get_mux_infos_from_layer_info(layer_info);
- CHECK_EXPECTED(mux_infos);
-
- return OutputDemuxerBase(src_frame_size, mux_infos.release());
-}
-
-hailo_status OutputDemuxerBase::get_mux_info_from_layer_info_impl(hailo_mux_info_t &mux_info, const LayerInfo &layer_info,
- uint32_t &offset, uint32_t height_ratio, std::vector<hailo_mux_info_t> &res, size_t &number_of_mux_infos)
-{
- // This is a recursive function with a maximum depth of HailoRTCommon::MUX_INFO_COUNT.
- mux_info.info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
-
- mux_info.row_size = height_ratio * layer_info.hw_shape.width * layer_info.hw_shape.features * layer_info.hw_data_bytes;
- mux_info.row_counter = 0;
-
- if (mux_info.info.is_mux) {
- int i = 0;
- CHECK(layer_info.predecessor.size() <= HailoRTCommon::MUX_INFO_COUNT, HAILO_INTERNAL_FAILURE, "Too many mux edges");
- for (auto &pred : layer_info.predecessor) {
- hailo_mux_info_t successor = {};
- auto status = get_mux_info_from_layer_info_impl(successor,
- pred, offset, layer_info.height_ratios[i], res, number_of_mux_infos);
- CHECK_SUCCESS(status);
- res.push_back(successor);
- mux_info.successors[i] = &(res.back());
- i++;
- number_of_mux_infos++;
- }
- mux_info.successors_count = static_cast<uint32_t>(layer_info.predecessor.size());
- mux_info.rows_gcd = layer_info.height_gcd;
- } else {
- mux_info.offset = offset;
- offset += mux_info.info.hw_frame_size;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status fuse_buffers(const std::vector<MemoryView> &buffers,
- const std::vector<hailo_nms_info_t> &infos_of_buffers, MemoryView dst)
-{
- CHECK_ARG_NOT_NULL(dst.data());
- CHECK(buffers.size() == infos_of_buffers.size(), HAILO_INVALID_ARGUMENT,
- "Vectors of buffers and NMS infos does not match!");
- CHECK(HailoRTCommon::MAX_DEFUSED_LAYER_COUNT >= buffers.size(), HAILO_INVALID_ARGUMENT,
- "Buffers count is bigger than allowed! ({} > {})", buffers.size(), HailoRTCommon::MAX_DEFUSED_LAYER_COUNT);
-
- // Order the buffers by their class group index, which specifies in what order they should me fused.
- auto frames = std::vector<std::pair<const hailo_nms_info_t*, const MemoryView*>>(buffers.size());
- for (uint32_t i = 0; i < infos_of_buffers.size(); ++i) {
- frames[infos_of_buffers[i].defuse_info.class_group_index].first = &infos_of_buffers[i];
- frames[infos_of_buffers[i].defuse_info.class_group_index].second = &buffers[i];
- }
-
- uint32_t total_num_of_classes = 0;
- size_t total_size_of_buffers = 0;
- for (const auto &frame_pair : frames) {
- auto &info = *frame_pair.first;
- auto &buffer = *frame_pair.second;
- total_num_of_classes += info.number_of_classes * info.chunks_per_frame;
- total_size_of_buffers += buffer.size();
- CHECK(buffer.size() == HailoRTCommon::get_nms_hw_frame_size(info), HAILO_INVALID_ARGUMENT,
- "Source buffer size is not same as NMS HW frame size! ({} != {})", buffer.size(),
- HailoRTCommon::get_nms_hw_frame_size(info));
- }
-
- // Each frame contributes 1 extra bbox_size at the end of it which acts as a delimiter, but we don't copy those to the fused buffer.
- // We keep the size of the dst buffer 1 bbox_size too big to stay in the format of not defused nms frames.
- total_size_of_buffers -= (frames.size() - 1) * frames[0].first->bbox_size;
-
- CHECK(dst.size() == total_size_of_buffers, HAILO_INVALID_ARGUMENT,
- "Size of destination buffer is not same as the expected size of the fused frame! (size: {}, expected: {})",
- dst.size(), total_size_of_buffers);
-
- uint32_t offsets[HailoRTCommon::MAX_DEFUSED_LAYER_COUNT] = {0};
- uint32_t dst_offset = 0;
- for (uint32_t i = 0; i < total_num_of_classes; i++) {
- size_t buff_index = (i % frames.size());
- auto &info = *frames[buff_index].first;
- auto &buffer = *frames[buff_index].second;
-
- const uint8_t *src_ptr = buffer.data();
- // TODO: Maybe change asserts to checks
- assert(offsets[buff_index] + sizeof(nms_bbox_counter_t) <= buffer.size());
- nms_bbox_counter_t bbox_count = *reinterpret_cast<const nms_bbox_counter_t*>(src_ptr + offsets[buff_index]);
- uint32_t copy_size = static_cast<uint32_t>(sizeof(bbox_count) + bbox_count * info.bbox_size);
- assert(offsets[buff_index] + copy_size <= buffer.size());
- assert(dst_offset + copy_size <= dst.size());
- std::copy_n(src_ptr + offsets[buff_index], copy_size, dst.data() + dst_offset);
- offsets[buff_index] += copy_size;
- dst_offset += copy_size;
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::vector<hailo_mux_info_t>> OutputDemuxerBase::get_mux_infos_from_layer_info(const LayerInfo &layer_info)
-{
- // Setting the first mux
- std::vector<hailo_mux_info_t> res;
- res.reserve(HailoRTCommon::MUX_INFO_COUNT);
- res.push_back({});
- uint32_t offset = 0;
- uint32_t height_ratio = 0;
- size_t number_of_mux_infos = 1;
-
- auto status = get_mux_info_from_layer_info_impl(res[0], layer_info, offset, height_ratio, res, number_of_mux_infos);
- CHECK_SUCCESS_AS_EXPECTED(status);
- res.resize(number_of_mux_infos);
-
- return res;
-}
-
-OutputDemuxerBase::OutputDemuxerBase(size_t src_frame_size, std::vector<hailo_mux_info_t> &&mux_infos) :
- OutputDemuxer(src_frame_size),
- m_mux_infos(std::move(mux_infos)) {}
-
-hailo_status OutputDemuxerBase::transform_demux(const MemoryView src, std::vector<MemoryView> &raw_buffers)
-{
- size_t raw_buffer_index = 0;
- size_t total_mux_sizes = 0;
-
- CHECK(raw_buffers.size() == get_edges_stream_info().size(), HAILO_INVALID_ARGUMENT,
- "There is a missmatch between mux edges counts ({}) and raw_buffers_size ({})", get_edges_stream_info().size(),
- raw_buffers.size());
-
- // Reset the runtime offset
- for (auto &mux_edge : m_mux_infos) {
- if (!mux_edge.info.is_mux) {
- mux_edge.buffer = (void*)((uintptr_t)raw_buffers[raw_buffer_index].data());
- mux_edge.current_offset = 0;
- mux_edge.row_counter = 0;
- CHECK((mux_edge.info.hw_frame_size == raw_buffers[raw_buffer_index].size()), HAILO_INVALID_ARGUMENT,
- "Expected buffer size of {}, got {}", mux_edge.info.hw_frame_size, raw_buffers[raw_buffer_index].size());
- total_mux_sizes += mux_edge.info.hw_frame_size;
- raw_buffer_index++;
- }
- }
- CHECK(total_mux_sizes == src.size(), HAILO_INVALID_ARGUMENT,
- "src_size must be: {}, passed_size: {}", total_mux_sizes, src.size());
-
- // TODO: Optimization - Read directly to user raw buffers (in case of NO_TRANSFORM, INPLACE_TRANSFORM)
-
- auto first_mux_info = m_mux_infos[0];
- return transform_demux_raw_frame(src.data(), 0, &first_mux_info, first_mux_info.rows_gcd);
-}
-
-hailo_status OutputDemuxerBase::transform_demux(const MemoryView src, const std::map<std::string, MemoryView> &dst_ptrs)
-{
- size_t total_mux_sizes = 0;
- // Reset the runtime offset
- for (auto &mux_edge : m_mux_infos) {
- if (!mux_edge.info.is_mux) {
- auto name = std::string(mux_edge.info.name);
- CHECK(contains(dst_ptrs, name), HAILO_INVALID_ARGUMENT, "edge name {} is not in dst_ptrs", name);
- mux_edge.buffer = const_cast<void*>(reinterpret_cast<const void*>((dst_ptrs.at(name)).data()));
- mux_edge.current_offset = 0;
- mux_edge.row_counter = 0;
- CHECK((mux_edge.info.hw_frame_size == (dst_ptrs.at(name)).size()), HAILO_INVALID_ARGUMENT,
- "Expected buffer size of {}, got {}", mux_edge.info.hw_frame_size, (dst_ptrs.at(name)).size());
- total_mux_sizes += mux_edge.info.hw_frame_size;
- }
- }
- CHECK(total_mux_sizes == src.size(), HAILO_INVALID_ARGUMENT, "src_size must be: {}, passed_size: {}",
- total_mux_sizes, src.size());
-
- auto first_mux_info = m_mux_infos[0];
- return transform_demux_raw_frame(src.data(), 0, &first_mux_info, first_mux_info.rows_gcd);
-}
-
-} /* namespace hailort */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/transform.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file transform.cpp
+ * @brief Implements transform module
+ **/
+#include "hailo/transform.hpp"
+#include "hailo/hailort.h"
+#include "hailo/stream.hpp"
+#include "hailo/expected.hpp"
+#include "hailo/hailort_common.hpp"
+#include "hailo/quantization.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/compiler_extensions_compat.hpp"
+#include "common/logger_macros.hpp"
+#include "common/utils.hpp"
+
+#include "transform/transform_internal.hpp"
+
+#include <type_traits>
+#include <sstream>
+
+
+namespace hailort
+{
+
+#define HW_DATA_ALIGNMENT (8)
+#define RGB_FEATURES (3)
+
+
+bool TransformContextUtils::should_quantize(const hailo_stream_direction_t stream_direction,
+ const hailo_format_t &src_format, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info)
+{
+ if (HAILO_H2D_STREAM == stream_direction) {
+ return (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) &&
+ (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags) &&
+ !((Quantization::is_identity_qp(quant_info)) && (src_format.type == dst_format.type)));
+ } else {
+ return (HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) &&
+ !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags);
+ }
+}
+
+bool TransformContextUtils::should_transpose(const hailo_format_flags_t &src_flags, const hailo_format_flags_t &dst_flags)
+{
+ return ((HAILO_FORMAT_FLAGS_TRANSPOSED & src_flags) != (HAILO_FORMAT_FLAGS_TRANSPOSED & dst_flags));
+}
+
+bool TransformContextUtils::should_reorder(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format)
+{
+
+ /* If shapes and format are different - need to use transform_context */
+ if (!((src_image_shape.features == dst_image_shape.features) &&
+ (src_image_shape.height == dst_image_shape.height) &&
+ (src_image_shape.width == dst_image_shape.width) &&
+ (src_format.order == dst_format.order) &&
+ (src_format.type == dst_format.type))) {
+ return true;
+ }
+
+ /* Some orders has to be reordered, even if shapes and types are the same
+ Note: In order to add new order to the list - add test to test_transform with all shapes and types same
+ pre and post transform */
+ switch (src_format.order) {
+ case HAILO_FORMAT_ORDER_NHWC:
+ case HAILO_FORMAT_ORDER_NHCW:
+ case HAILO_FORMAT_ORDER_NC:
+ case HAILO_FORMAT_ORDER_NHW:
+ case HAILO_FORMAT_ORDER_FCR:
+ case HAILO_FORMAT_ORDER_BAYER_RGB:
+ case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
+ case HAILO_FORMAT_ORDER_YUY2:
+ return false;
+ case HAILO_FORMAT_ORDER_F8CR:
+ case HAILO_FORMAT_ORDER_HAILO_NMS:
+ case HAILO_FORMAT_ORDER_RGB888:
+ case HAILO_FORMAT_ORDER_NCHW:
+ case HAILO_FORMAT_ORDER_NV12:
+ case HAILO_FORMAT_ORDER_NV21:
+ return true;
+ default:
+ LOGGER__WARN("Hailo Internal warning - Unrecognised order. Transformation optimization would not be activated");
+ /* In case user asks to add new order - please add this order to one of the true or false lists */
+ assert(false);
+ return true;
+ }
+}
+
+bool TransformContextUtils::is_transformation_required(const hailo_stream_direction_t stream_direction,
+ const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info)
+{
+ /* This function should be called after auto expend function */
+ assert((HAILO_FORMAT_ORDER_AUTO != src_format.order) && (HAILO_FORMAT_ORDER_AUTO != dst_format.order));
+ assert((HAILO_FORMAT_TYPE_AUTO != src_format.type) && (HAILO_FORMAT_TYPE_AUTO != dst_format.type));
+
+ return (should_quantize(stream_direction, src_format, dst_format, quant_info) ||
+ should_transpose(src_format.flags, dst_format.flags) ||
+ should_reorder(src_image_shape, src_format, dst_image_shape, dst_format));
+}
+
+std::string TransformContextUtils::make_quantization_description(hailo_format_type_t src_type,
+ hailo_format_type_t dst_type, hailo_quant_info_t quant_info)
+{
+ std::stringstream quant_description;
+ quant_description << "Quantization - src_type: " << HailoRTCommon::get_format_type_str(src_type) <<
+ ", dst_type " << HailoRTCommon::get_format_type_str(dst_type) <<
+ ", qp_scale: " << quant_info.qp_scale <<
+ ", qp_zp: " << quant_info.qp_zp <<
+ ", limvals_min: " << quant_info.limvals_min <<
+ ", limvals_max: " << quant_info.limvals_max;
+
+ return quant_description.str();
+}
+
+std::string TransformContextUtils::make_reorder_description(hailo_format_order_t src_order, hailo_3d_image_shape_t src_shape,
+ hailo_format_order_t dst_order, hailo_3d_image_shape_t dst_shape)
+{
+ std::stringstream reorder_description;
+ reorder_description << "Reorder - src_order: " << HailoRTCommon::get_format_order_str(src_order) << ", src_shape: (" <<
+ src_shape.height << ", " << src_shape.width << ", " << src_shape.features << ")" <<
+ ", dst_order: " << HailoRTCommon::get_format_order_str(dst_order) << ", dst_shape: (" <<
+ dst_shape.height << ", " << dst_shape.width << ", " << dst_shape.features << ")";
+
+ return reorder_description.str();
+}
+
+std::string TransformContextUtils::make_transpose_description(hailo_3d_image_shape_t src_shape, hailo_3d_image_shape_t transposed_shape)
+{
+ std::stringstream transpose_description;
+ transpose_description << "Transpose - src_shape: (" <<
+ src_shape.height << ", " << src_shape.width << ", " << src_shape.features << ")" <<
+ ", dst_shape: (" << transposed_shape.height << ", " << transposed_shape.width << ", " << transposed_shape.features << ")";
+
+ return transpose_description.str();
+}
+
+template<typename T, typename Q>
+void cast_elements_inplace(T *dst_ptr, uint32_t frame_size)
+{
+ static_assert(sizeof(T) >= sizeof(Q), "cast_elements_inplace() cannot cast to smaller size");
+ for (int32_t i = (int32_t)frame_size - 1; i >= 0; i--) {
+ dst_ptr[i] = (T)(*((Q*)dst_ptr + i));
+ }
+}
+
+/* Transpose funcs */
+static hailo_3d_image_shape_t transposed_shape(const hailo_3d_image_shape_t &shape)
+{
+ hailo_3d_image_shape_t transposed_shape = shape;
+ std::swap(transposed_shape.height, transposed_shape.width);
+ return transposed_shape;
+}
+
+static hailo_status transform__transpose_NHWC(const void *src_ptr, const hailo_3d_image_shape_t &shape,
+ size_t feature_bytes_size, void *dst_ptr)
+{
+ // Flatten the features, look at the data as HW matrix
+ const size_t element_size = shape.features * feature_bytes_size;
+ const uint8_t *src_matrix = reinterpret_cast<const uint8_t*>(src_ptr);
+ uint8_t *dst_matrix = reinterpret_cast<uint8_t*>(dst_ptr);
+ for (size_t r = 0; r < shape.height; r++) {
+ for (size_t c = 0; c < shape.width; c++) {
+ // dest[c][r] = src[r][c]
+ size_t src_offset = element_size * ((r * shape.width) + c);
+ const uint8_t *src_pos = src_matrix + src_offset;
+
+ size_t dst_offset = element_size * ((c * shape.height) + r);
+ uint8_t *dst_pos = dst_matrix + dst_offset;
+
+ memcpy(dst_pos, src_pos, element_size);
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status transform__transpose_buffer(const void *src_ptr, const hailo_3d_image_shape_t &shape,
+ const hailo_format_t &format, void *dst_ptr)
+{
+ switch (format.order)
+ {
+ case HAILO_FORMAT_ORDER_NHWC:
+ case HAILO_FORMAT_ORDER_NHW:
+ case HAILO_FORMAT_ORDER_BAYER_RGB:
+ case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
+ case HAILO_FORMAT_ORDER_FCR:
+ case HAILO_FORMAT_ORDER_F8CR:
+ return transform__transpose_NHWC(src_ptr, shape, HailoRTCommon::get_format_data_bytes(format), dst_ptr);
+ default:
+ LOGGER__ERROR("Transpose is not supported for order {}", format.order);
+ return HAILO_INVALID_OPERATION;
+ }
+}
+
+hailo_status transpose_buffer(const MemoryView src, const hailo_3d_image_shape_t &shape,
+ const hailo_format_t &format, MemoryView dst)
+{
+ if ((src.size() != dst.size()) || (src.size() != HailoRTCommon::get_frame_size(shape, format))) {
+ LOGGER__ERROR("transpose NHWC invalid buffers size");
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ return transform__transpose_buffer(src.data(), shape, format, dst.data());
+}
+
+
+/* Re-Ordering funcs */
+template<typename T>
+void transform__h2d_NHWC_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+ uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
+ uint32_t pad_size = (dst_image_shape->width - src_image_shape->width) * dst_image_shape->features;
+
+ /* copy src to dst, and pad width to 8 elements */
+ for (uint32_t r = 0; r < src_image_shape->height ; r++) {
+ src_offset = r * src_image_shape->width * src_image_shape->features;
+ dst_offset = r * dst_image_shape->width * dst_image_shape->features;
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, src_row_size * sizeof(T));
+ memset(dst_ptr + dst_offset + src_row_size, 0, pad_size * sizeof(T));
+ }
+}
+
+template<typename T>
+void transform__d2h_NHWC_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+
+ // copy and removed padded features
+ for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
+ for (uint32_t c = 0; c < dst_image_shape->width ; c++) {
+ src_offset = r * src_image_shape->width * src_image_shape->features + c * src_image_shape->features;
+ dst_offset = r * dst_image_shape->width * dst_image_shape->features + c * dst_image_shape->features;
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, dst_image_shape->features * sizeof(T));
+ }
+ }
+}
+
+template<typename T>
+void transform__h2d_NV12_to_NV12(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape, T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+ uint32_t rows_count = src_image_shape->height * src_image_shape->features;
+ ASSERT(0 == fmod(rows_count, 1.5));
+ ASSERT(0 == (src_image_shape->width % 2));
+
+ auto row_leftover = dst_image_shape->width - src_image_shape->width;
+
+ size_t src_offset_y = 0;
+ size_t src_offset_uv = ((static_cast<uint32_t>(rows_count / 1.5)) * src_image_shape->width);
+ size_t dst_offset = 0;
+
+ for(uint32_t h = 0; h < (static_cast<uint32_t>(rows_count / 1.5)); h += 2) {
+ /* Copy 2 rows of Y for each row of U,V */
+ // Copy Y
+ for (auto i = 0; i < 2; i++) {
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset_y, (src_image_shape->width * sizeof(T)));
+ src_offset_y += (src_image_shape->width);
+ dst_offset += (src_image_shape->width);
+ memset((dst_ptr + dst_offset), 0, (row_leftover * sizeof(T)));
+ dst_offset += row_leftover;
+ }
+
+ // Copy U, V
+ memcpy(dst_ptr + dst_offset, (src_ptr + src_offset_uv), (src_image_shape->width * sizeof(T)));
+ src_offset_uv += src_image_shape->width;
+ dst_offset += src_image_shape->width;
+ memset((dst_ptr + dst_offset), 0, (row_leftover * sizeof(T)));
+ dst_offset += row_leftover;
+ }
+}
+
+template <typename T>
+void transform__h2d_I420_to_YYYYUV(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape, T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+ uint32_t rows_count = src_image_shape->height * src_image_shape->features;
+ ASSERT(0 == (rows_count % 3));
+ ASSERT(0 == (src_image_shape->width % 2));
+ ASSERT(dst_image_shape->width >= src_image_shape->width);
+
+ auto padding_size_y = (dst_image_shape->width - src_image_shape->width);
+ auto padding_size_uv = (dst_image_shape->width / 2) - (src_image_shape->width / 2);
+
+ uint32_t y_plane_rows_count = static_cast<uint32_t>(rows_count / 1.5);
+
+ size_t src_offset_y = 0;
+ size_t src_offset_u = (y_plane_rows_count * src_image_shape->width);
+ size_t src_offset_v = src_offset_u + (static_cast<uint32_t>((y_plane_rows_count / 2) * (src_image_shape->width / 2)));
+ size_t dst_offset = 0;
+
+ for(uint32_t h = 0; h < y_plane_rows_count; h += 2) {
+ // Copy Y
+ for (auto j = 0; j < 2; j++) {
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset_y, (src_image_shape->width * sizeof(T)));
+ src_offset_y += (src_image_shape->width);
+ dst_offset += (src_image_shape->width);
+ // add padding
+ memset((dst_ptr + dst_offset), 0, (padding_size_y * sizeof(T)));
+ dst_offset += padding_size_y;
+ }
+
+ // Copy U/2
+ memcpy(dst_ptr + dst_offset, (src_ptr + src_offset_u), ((src_image_shape->width / 2) * sizeof(T)));
+ src_offset_u += (src_image_shape->width / 2);
+ dst_offset += (src_image_shape->width / 2);
+ // Add padding
+ memset((dst_ptr + dst_offset), 0, (padding_size_uv * sizeof(T)));
+ dst_offset += padding_size_uv;
+
+ // Copy V/2
+ memcpy(dst_ptr + dst_offset, (src_ptr + src_offset_v), ((src_image_shape->width / 2) * sizeof(T)));
+ src_offset_v += (src_image_shape->width / 2);
+ dst_offset += (src_image_shape->width / 2);
+ // Add padding
+ memset((dst_ptr + dst_offset), 0, (padding_size_uv * sizeof(T)));
+ dst_offset += padding_size_uv;
+ }
+}
+
+template<typename T>
+void transform__h2d_NHWC_to_NHCW(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
+ uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
+
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+ uint32_t pad_size = dst_image_shape->width - src_image_shape->width;
+
+ /* transpose - switch width and channels */
+ for (uint32_t r = 0; r < src_image_shape->height ; r++) {
+ for (uint32_t f = 0; f < src_image_shape->features; f++) {
+ for (uint32_t c = 0; c < src_image_shape->width; c++) {
+ src_offset = r * src_row_size + c * src_image_shape->features + f;
+ dst_offset = r * dst_row_size + f * dst_image_shape->width + c;
+ dst_ptr[dst_offset] = src_ptr[src_offset];
+ }
+ /* pad width to 8 elemnts */
+ if (pad_size != 0) {
+ dst_offset = r * dst_row_size + f * dst_image_shape->width + src_image_shape->width;
+ memset(dst_ptr + dst_offset, 0, pad_size * sizeof(T));
+ }
+ }
+ }
+}
+
+template<typename T>
+void transform__d2h_NHCW_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ /* transpose - switch channels and width, ignore padded elements */
+ const auto row_size_src = src_image_shape->width * src_image_shape->features;
+ const auto row_size_dest = dst_image_shape->width * dst_image_shape->features;
+ for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
+ const auto row_offset_src = r * row_size_src;
+ const auto row_offset_dest = r * row_size_dest;
+ for (uint32_t c = 0; c < dst_image_shape->width; c++) {
+ const auto src_offset = row_offset_src + c;
+ const auto dest_offset = row_offset_dest + c * dst_image_shape->features;
+ for (uint32_t f = 0; f < dst_image_shape->features; f++) {
+ dst_ptr[dest_offset + f] = src_ptr[src_offset + f * src_image_shape->width];
+ }
+ }
+ }
+}
+
+template<typename T>
+void transform__d2h_NHW_to_NHW(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape, T *dst_ptr,
+ hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ for (uint32_t row = 0; row < dst_image_shape->height; row++) {
+ const T *src = src_ptr + (row * src_image_shape->width);
+ T* dst = dst_ptr + row * dst_image_shape->width;
+ std::copy_n(src, dst_image_shape->width, dst);
+ }
+}
+
+template<typename T>
+void transform__h2d_NC_to_NC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ /* copy src to dst, and pad channels to 8 elements */
+ memcpy(dst_ptr, src_ptr, src_image_shape->features * sizeof(T));
+ memset(dst_ptr + src_image_shape->features, 0, (dst_image_shape->features - src_image_shape->features) * sizeof(T));
+}
+
+template<typename T>
+void transform__d2h_NC_to_NC(const T *src_ptr, T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ memcpy(dst_ptr, src_ptr, dst_image_shape->features * sizeof(T));
+}
+
+static inline void transform__parse_and_copy_bbox (hailo_bbox_t *dst, uint64_t* proposal)
+{
+ dst->y_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000000) >> 36);
+ dst->x_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000) >> 24);
+ dst->y_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff000) >> 12);
+ dst->x_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff));
+ dst->score = (uint16_t)((*((uint64_t*)proposal) & 0xffff000000000000) >> 48);
+}
+
+void transform__d2h_NMS(const uint8_t *src_ptr, uint8_t *dst_ptr, const hailo_nms_info_t &nms_info, std::vector<size_t> &chunk_offsets)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ uint32_t num_of_classes = nms_info.number_of_classes;
+ uint32_t bbox_size = nms_info.bbox_size;
+
+ size_t bbox_index = 0;
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+
+ nms_bbox_counter_t class_bboxes_count = 0;
+
+ // For each class, we need to merge bboxes from all nms chunks. Therefore we use chunk_offsets - for
+ // each nms chunk we store its offset, any time we finish parsing some class bboxes, we update the
+ // offset
+
+ // First, init the chunk_offset vector
+ assert(chunk_offsets.size() == nms_info.chunks_per_frame);
+ size_t current_offset = 0;
+ chunk_offsets[0] = current_offset;
+ for (size_t chunk_index = 1; chunk_index < nms_info.chunks_per_frame; chunk_index++) {
+ // Skip all classes. Can be optimized if we store the size of each chunk in the begining of the buffer
+ for (size_t class_index = 0; class_index < num_of_classes; class_index++) {
+ class_bboxes_count = *(reinterpret_cast<const nms_bbox_counter_t*>(src_ptr + current_offset));
+ current_offset += sizeof(nms_bbox_counter_t) + (class_bboxes_count * bbox_size);
+ }
+ chunk_offsets[chunk_index] = current_offset;
+ }
+
+ // Now, the merge itself
+ for (size_t class_index = 0; class_index < num_of_classes; class_index++) {
+ nms_bbox_counter_t *dst_bbox_counter = reinterpret_cast<nms_bbox_counter_t*>(dst_ptr + dst_offset);
+ *dst_bbox_counter = 0;
+
+ dst_offset += sizeof(nms_bbox_counter_t);
+
+ for (size_t chunk_index = 0; chunk_index < nms_info.chunks_per_frame; chunk_index++) {
+ // Add bbox from all chunks of current class
+ src_offset = chunk_offsets[chunk_index];
+ class_bboxes_count = *((nms_bbox_counter_t*)((uint8_t*)src_ptr + src_offset));
+ *dst_bbox_counter = static_cast<nms_bbox_counter_t>(*dst_bbox_counter + class_bboxes_count);
+
+ src_offset += sizeof(nms_bbox_counter_t);
+
+ for (bbox_index = 0; bbox_index < class_bboxes_count; bbox_index++) {
+ transform__parse_and_copy_bbox((hailo_bbox_t *)(dst_ptr + dst_offset), (uint64_t*)(src_ptr + src_offset));
+ src_offset += bbox_size;
+ dst_offset += sizeof(hailo_bbox_t);
+ }
+
+ chunk_offsets[chunk_index] = src_offset;
+ }
+ }
+}
+
+template<typename T>
+void transform__h2d_FCR(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+ uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
+ uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
+ uint32_t pad_size = dst_image_shape->features - src_image_shape->features;
+
+ for (uint32_t r = 0; r < src_image_shape->height ; r++) {
+ for (uint32_t c = 0; c < src_image_shape->width; c++) {
+ src_offset = r * src_row_size + c * src_image_shape->features;
+ dst_offset = r * dst_row_size + c * dst_image_shape->features;
+
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, src_image_shape->features * sizeof(T));
+ dst_offset += src_image_shape->features;
+ memset(dst_ptr + dst_offset, 0, pad_size * sizeof(T));
+ }
+ }
+}
+
+template<typename T>
+void transform__h2d_F8CR(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
+ uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
+ uint32_t src_features = src_image_shape->features;
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+
+ /* copy src data to dst, 8channels * width at a time, pad features to 8 elemnts */
+ for (uint32_t r = 0; r < src_image_shape->height ; r++) {
+ for (uint32_t c = 0; c < src_image_shape->width; c++) {
+ for (uint32_t f = 0; f < src_image_shape->features; f+=8) {
+ src_offset = r * src_row_size + c * src_image_shape->features + f;
+ dst_offset = r * dst_row_size + c * HW_DATA_ALIGNMENT + f * dst_image_shape->width;
+ if (f + HW_DATA_ALIGNMENT <= src_image_shape->features) {
+ /* take 8 full features for each column and write them */
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, HW_DATA_ALIGNMENT * sizeof(T));
+ }
+ else {
+ /* take the last 8 or less features, pad features to 8 and write */
+ auto last_features = (src_features % HW_DATA_ALIGNMENT);
+ auto remainder = (HW_DATA_ALIGNMENT - last_features);
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, last_features * sizeof(T));
+ dst_offset += last_features;
+ memset(dst_ptr + dst_offset, 0, remainder * sizeof(T));
+ }
+ }
+ }
+ }
+}
+
+template<typename T>
+void transform__d2h_F8CR(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
+ uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
+ uint32_t dst_features = dst_image_shape->features;
+ uint32_t src_offset = 0;
+ uint32_t dst_offset = 0;
+
+ for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
+ for (uint32_t c = 0; c < dst_image_shape->width; c++) {
+ for (uint32_t f = 0; f < dst_image_shape->features; f+=8) {
+ src_offset = r * src_row_size + c * HW_DATA_ALIGNMENT + f * src_image_shape->width;
+ dst_offset = r * dst_row_size + c * dst_image_shape->features + f;
+ if (f + HW_DATA_ALIGNMENT <= dst_image_shape->features) {
+ /* copy the first dst_image_features (which are aligned to 8)! */
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, HW_DATA_ALIGNMENT * sizeof(T));
+ }
+ else {
+ /* copy the last 8 or less features, remove pad */
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, (dst_features % HW_DATA_ALIGNMENT) * sizeof(T));
+ }
+ }
+ }
+ }
+}
+
+template<typename T>
+void transform__d2h_BAYER_RGB(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ uint32_t src_offset = 0;
+ uint32_t dst_offset = 0;
+
+ for (uint32_t r = 0; r < dst_image_shape->height ; r++) {
+ src_offset = r * src_image_shape->width;
+ dst_offset = r * dst_image_shape->width;
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, dst_image_shape->width * sizeof(T));
+ }
+}
+
+template<typename T>
+hailo_status transform__h2d_NHWC_to_RGB888(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+ uint32_t pad_size = (dst_image_shape->width - src_image_shape->width) * dst_image_shape->features;
+
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ CHECK(((RGB_FEATURES == src_image_shape->features) && ((RGB_FEATURES + 1) == dst_image_shape->features)),
+ HAILO_INVALID_ARGUMENT,
+ "User features must be {}, received {}. HW features must be {}, received {}",
+ RGB_FEATURES, src_image_shape->features, RGB_FEATURES + 1, dst_image_shape->features);
+
+ for (uint32_t r = 0; r < src_image_shape->height ; r++) {
+ for (uint32_t c = 0; c < src_image_shape->width; c++) {
+ src_offset = r * src_image_shape->width * src_image_shape->features + c * src_image_shape->features;
+ dst_offset = r * dst_image_shape->width * dst_image_shape->features + c * dst_image_shape->features;
+
+ /* Copy while flipping the data feature-wise */
+ for (uint32_t f = 0; f < src_image_shape->features; f++) {
+ dst_ptr[dst_offset + f] = src_ptr[src_offset + src_image_shape->features - f - 1];
+ }
+ /* add another zero byte */
+ dst_ptr[dst_offset + RGB_FEATURES] = 0;
+ }
+ /* move dst_offset 4 features (RGB + 1 zero byte) and pad width if needed */
+ memset(dst_ptr + dst_offset + RGB_FEATURES + 1, 0, pad_size * sizeof(T));
+ }
+
+ return HAILO_SUCCESS;
+}
+
+template<typename T>
+hailo_status transform__h2d_NCHW_to_NHCW(
+ const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+ CHECK(src_image_shape->features == dst_image_shape->features, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform features src/dst should be the same");
+ CHECK(src_image_shape->height == dst_image_shape->height, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform height src/dst should be the same");
+ CHECK(src_image_shape->width <= dst_image_shape->width, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform src width should be smaller/equal than dst width");
+ CHECK((dst_image_shape->width % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform dst width must be aligned to {}", HW_DATA_ALIGNMENT);
+
+ size_t width_size = src_image_shape->width;
+ size_t pad_size = (dst_image_shape->width - src_image_shape->width);
+ for (uint32_t c = 0; c < src_image_shape->features; c++) {
+ for (uint32_t r = 0; r < src_image_shape->height; r++) {
+ // Copy width
+ const T *src = src_ptr +
+ src_image_shape->width * src_image_shape->height * c +
+ src_image_shape->width * r;
+ T *dst = dst_ptr +
+ dst_image_shape->features * dst_image_shape->width * r +
+ dst_image_shape->width * c;
+
+ std::copy_n(src, width_size, dst);
+ if (pad_size != 0) {
+ std::fill_n(dst + width_size, pad_size, static_cast<T>(0));
+ }
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+template<typename T>
+hailo_status transform__d2h_NHCW_to_NCHW(
+ const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+ T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+ CHECK(src_image_shape->features == dst_image_shape->features, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform features src/dst should be the same");
+ CHECK(src_image_shape->height == dst_image_shape->height, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform height src/dst should be the same");
+ CHECK(dst_image_shape->width <= src_image_shape->width, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform dst width should be smaller/equal than src width");
+ CHECK((src_image_shape->width % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+ "NCHW_to_NHCW Transform src width must be aligned to {}", HW_DATA_ALIGNMENT);
+
+ size_t width_size = dst_image_shape->width;
+ for (uint32_t r = 0; r < src_image_shape->height; r++) {
+ for (uint32_t c = 0; c < src_image_shape->features; c++) {
+ // Copy width
+ T *dst = dst_ptr +
+ dst_image_shape->width * dst_image_shape->height * c +
+ dst_image_shape->width * r;
+ const T *src = src_ptr +
+ src_image_shape->features * src_image_shape->width * r +
+ src_image_shape->width * c;
+
+ std::copy_n(src, width_size, dst);
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+template<typename T>
+hailo_status transform__d2h_argmax_NHCW_to_NHW(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape,
+ T *dst_ptr, const hailo_3d_image_shape_t &dst_image_shape)
+{
+ assert(nullptr != src_ptr);
+ assert(nullptr != dst_ptr);
+
+ CHECK(src_image_shape.height == dst_image_shape.height, HAILO_INVALID_OPERATION,
+ "NHCW_to_NHW argmax Transform is supported only when src height ({}) is equal to dst height ({})",
+ src_image_shape.height, dst_image_shape.height);
+ CHECK(src_image_shape.width >= dst_image_shape.width, HAILO_INVALID_OPERATION,
+ "NHCW_to_NHW argmax Transform is supported only when src width ({}) is equal/larger than dst width ({})",
+ src_image_shape.width, dst_image_shape.width);
+ CHECK(dst_image_shape.features == 1, HAILO_INVALID_OPERATION,
+ "NHCW_to_NHW argmax Transform is supported only when dst features ({}) is 1",
+ dst_image_shape.features);
+ CHECK(src_image_shape.features < std::numeric_limits<T>::max(), HAILO_INVALID_OPERATION,
+ "NHCW_to_NHW argmax Transform is supported only when src features ({}) is smaller than {}",
+ src_image_shape.features, std::numeric_limits<T>::max());
+
+ const auto src_row_size = src_image_shape.width * src_image_shape.features;
+ const auto dst_row_size = dst_image_shape.width;
+ for (uint32_t r = 0; r < src_image_shape.height; r++) {
+ // For each row, we iterate on all columns, and find the max feature. It can be implemented better by iteratre
+ // over all features, and on each iteration save the max value for each column.
+ const T *src_row = src_ptr + (r * src_row_size);
+ T *dst_row = dst_ptr + (r * dst_row_size);
+ for (uint32_t w = 0; w < dst_image_shape.width; w++) {
+ const T *offset_in_row = src_row + w;
+ T max_index = 0;
+ T max_value = *offset_in_row;
+
+ for (uint32_t c = 1; c < src_image_shape.features; c++) {
+ offset_in_row += src_image_shape.width;
+ const auto ¤t_value = *offset_in_row;
+ if (current_value > max_value) {
+ max_index = static_cast<T>(c);
+ max_value = current_value;
+ }
+ }
+
+ dst_row[w] = max_index;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+
+template<typename T>
+hailo_status transform__h2d_YUY2_to_YUY2(const T *src_ptr, T *dst_ptr, uint32_t shape_size)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ CHECK((shape_size % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+ "YUY2_to_YUY2 Transform shape_size must be aligned to {}", HW_DATA_ALIGNMENT);
+
+ std::copy_n(src_ptr, shape_size, dst_ptr);
+
+ return HAILO_SUCCESS;
+}
+
+template<typename T>
+hailo_status transform__h2d_RGB4_to_NHWC(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape, T *dst_ptr,
+ const hailo_3d_image_shape_t &dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ const auto row_size = src_image_shape.width * src_image_shape.features;
+ const auto src_row_size = HailoRTCommon::align_to(row_size, RGB4_ALIGNMENT);
+ const auto dst_row_size = dst_image_shape.width * dst_image_shape.features;
+
+ const auto pad_size = (dst_image_shape.width - src_image_shape.width) * dst_image_shape.features;
+
+ uint32_t src_offset = 0;
+ uint32_t dst_offset = 0;
+
+ for (uint32_t r = 0; r < dst_image_shape.height; r++) {
+ src_offset = r * src_row_size;
+ dst_offset = r * dst_row_size;
+ memcpy(dst_ptr + dst_offset, src_ptr + src_offset, src_row_size * sizeof(T));
+ if (pad_size != 0) {
+ std::fill_n(dst_ptr + dst_offset + src_row_size, pad_size, static_cast<T>(0));
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+template<typename T>
+hailo_status transform__h2d_RGB4_to_NHCW(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape, T *dst_ptr,
+ const hailo_3d_image_shape_t &dst_image_shape)
+{
+ /* Validate arguments */
+ ASSERT(NULL != src_ptr);
+ ASSERT(NULL != dst_ptr);
+
+ const auto row_size = src_image_shape.width * src_image_shape.features;
+ const auto src_row_size = HailoRTCommon::align_to(row_size, RGB4_ALIGNMENT);
+ const auto dst_row_size = dst_image_shape.width * dst_image_shape.features;
+
+ const auto pad_size = (dst_image_shape.width - src_image_shape.width) * dst_image_shape.features;
+
+ uint32_t src_offset = 0;
+ uint32_t dst_offset = 0;
+
+ for (uint32_t r = 0; r < src_image_shape.height ; r++) {
+ /* transpose - switch width and channels */
+ for (uint32_t f = 0; f < src_image_shape.features; f++) {
+ for (uint32_t c = 0; c < src_image_shape.width; c++) {
+ src_offset = r * src_row_size + c * src_image_shape.features + f;
+ dst_offset = r * dst_row_size + f * dst_image_shape.width + c;
+ dst_ptr[dst_offset] = src_ptr[src_offset];
+ }
+ /* pad feature to 8 elemnts */
+ if (pad_size != 0) {
+ dst_offset = r * dst_row_size + f * dst_image_shape.width + src_image_shape.width;
+ std::fill_n(dst_ptr + dst_offset, pad_size, static_cast<T>(0));
+ }
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputTransformContext::quantize_stream(const void *src_ptr, void *quant_buffer)
+{
+ auto shape_size = HailoRTCommon::get_shape_size(m_src_image_shape);
+
+ switch (m_src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ if (HAILO_FORMAT_TYPE_UINT8 == m_dst_format.type) {
+ Quantization::quantize_input_buffer<uint8_t, uint8_t>((uint8_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_info);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ if (HAILO_FORMAT_TYPE_UINT8 == m_dst_format.type) {
+ Quantization::quantize_input_buffer<uint16_t, uint8_t>((uint16_t*)src_ptr, (uint8_t *)quant_buffer, shape_size, m_dst_quant_info);
+ }
+ else if (HAILO_FORMAT_TYPE_UINT16 == m_dst_format.type) {
+ Quantization::quantize_input_buffer<uint16_t, uint16_t>((uint16_t*)src_ptr, (uint16_t *)quant_buffer, shape_size, m_dst_quant_info);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ case HAILO_FORMAT_TYPE_FLOAT32:
+ if (HAILO_FORMAT_TYPE_UINT8 == m_dst_format.type) {
+ Quantization::quantize_input_buffer<float32_t, uint8_t>((float32_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_info);
+ }
+ else if (HAILO_FORMAT_TYPE_UINT16 == m_dst_format.type) {
+ Quantization::quantize_input_buffer<float32_t, uint16_t>((float32_t*)src_ptr, (uint16_t*)quant_buffer, shape_size, m_dst_quant_info);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+}
+
+hailo_status FrameOutputTransformContext::quantize_stream(const void *dst_ptr)
+{
+ auto shape_size = HailoRTCommon::get_shape_size(m_dst_image_shape);
+
+ switch (m_dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_in_place<uint8_t, uint8_t>((uint8_t*)dst_ptr, shape_size, m_dst_quant_info);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_in_place<uint16_t, uint8_t>((uint16_t*)dst_ptr, shape_size, m_dst_quant_info);
+ }
+ else if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_in_place<uint16_t, uint16_t>((uint16_t*)dst_ptr, shape_size, m_dst_quant_info);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ case HAILO_FORMAT_TYPE_FLOAT32:
+ /* if output layer is argmax - do not rescale */
+ if (HAILO_FORMAT_ORDER_NHW != m_dst_format.order) {
+ if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_in_place<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_info);
+ }
+ else if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_in_place<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_info);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ } else {
+ if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
+ cast_elements_inplace<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size);
+ }
+ else if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
+ cast_elements_inplace<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ }
+ break;
+ default:
+ LOGGER__ERROR("Invalid dst-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status reorder_input_stream(const void *src_ptr, hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
+ void *dst_ptr, hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
+{
+ if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_NHWC_to_NHCW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_NHWC_to_NHCW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_NC == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NC == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_NC_to_NC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_NC_to_NC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if (((HAILO_FORMAT_ORDER_FCR == src_format.order) || (HAILO_FORMAT_ORDER_NHWC == src_format.order)) &&
+ (HAILO_FORMAT_ORDER_FCR == dst_format.order)) {
+ assert(0 == (dst_image_shape.features % 8));
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_FCR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_FCR<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if (((HAILO_FORMAT_ORDER_F8CR == src_format.order) || (HAILO_FORMAT_ORDER_NHWC == src_format.order)) &&
+ (HAILO_FORMAT_ORDER_F8CR == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_F8CR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_F8CR<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
+ (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
+ assert(1 == src_image_shape.features);
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == src_format.order) &&
+ (HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == dst_format.order)) {
+ assert(1 == src_image_shape.features);
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
+ (HAILO_FORMAT_ORDER_RGB888 == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ return transform__h2d_NHWC_to_RGB888<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ return transform__h2d_NHWC_to_RGB888<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ if ((HAILO_FORMAT_ORDER_NCHW == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ return transform__h2d_NCHW_to_NHCW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ return transform__h2d_NCHW_to_NHCW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ if ((HAILO_FORMAT_ORDER_YUY2 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_YUY2 == dst_format.order)) {
+ auto shape_size = HailoRTCommon::get_shape_size(src_image_shape);
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ return transform__h2d_YUY2_to_YUY2<uint8_t>((uint8_t*)src_ptr, (uint8_t*)dst_ptr, shape_size);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ return transform__h2d_YUY2_to_YUY2<uint16_t>((uint16_t*)src_ptr, (uint16_t*)dst_ptr, shape_size);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ if (((HAILO_FORMAT_ORDER_NV12 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_HAILO_YYUV) == dst_format.order) ||
+ ((HAILO_FORMAT_ORDER_NV21 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_HAILO_YYVU) == dst_format.order)) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_NV12_to_NV12<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_NV12_to_NV12<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format {}", src_format.type);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if (((HAILO_FORMAT_ORDER_I420 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_HAILO_YYYYUV) == dst_format.order)) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_I420_to_YYYYUV<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_I420_to_YYYYUV<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format {}", src_format.type);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_RGB4 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_RGB4_to_NHWC<uint8_t>((uint8_t*)src_ptr, src_image_shape, (uint8_t*)dst_ptr, dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_RGB4_to_NHWC<uint16_t>((uint16_t*)src_ptr, src_image_shape, (uint16_t*)dst_ptr, dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ if ((HAILO_FORMAT_ORDER_RGB4 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
+ switch (dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__h2d_RGB4_to_NHCW<uint8_t>((uint8_t*)src_ptr, src_image_shape, (uint8_t*)dst_ptr, dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__h2d_RGB4_to_NHCW<uint16_t>((uint16_t*)src_ptr, src_image_shape, (uint16_t*)dst_ptr, dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ return HAILO_SUCCESS;
+ }
+
+ LOGGER__ERROR("Unsupported input stream transformation from hailo_format_order_t "
+ "{} to hailo_format_order_t {}", src_format.order, dst_format.order);
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status reorder_output_stream(const void *src_ptr, hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
+ void *dst_ptr, hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
+{
+ if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NHCW_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NHCW_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+ else if ((HAILO_FORMAT_ORDER_NC == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NC == dst_format.order)) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NC_to_NC<uint8_t>((uint8_t*)src_ptr, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NC_to_NC<uint16_t>((uint16_t*)src_ptr, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+ else if ((HAILO_FORMAT_ORDER_NHW == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHW == dst_format.order)) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NHW_to_NHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NHW_to_NHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+ else if ((HAILO_FORMAT_ORDER_FCR == src_format.order) &&
+ ((HAILO_FORMAT_ORDER_FCR == dst_format.order) || (HAILO_FORMAT_ORDER_NHWC == dst_format.order))) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+ else if ((HAILO_FORMAT_ORDER_F8CR == src_format.order) &&
+ ((HAILO_FORMAT_ORDER_F8CR == dst_format.order) || (HAILO_FORMAT_ORDER_NHWC == dst_format.order))) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_F8CR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_F8CR<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+ else if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
+ (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
+ assert((1 == src_image_shape.features) && (1 == dst_image_shape.features));
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_BAYER_RGB<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_BAYER_RGB<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NCHW) == dst_format.order) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NHCW_to_NCHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NHCW_to_NCHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_NHW == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NCHW) == dst_format.order) {
+
+ CHECK((src_image_shape.features == 1) && (dst_image_shape.features == 1), HAILO_INVALID_ARGUMENT,
+ "Invalid number of features. Expected 1, received hw: {}, user: {}",
+ src_image_shape.features, dst_image_shape.features);
+ switch (src_format.type) {
+ // We call for transform__d2h_NHW_to_NHW function since NCHW is the same as NHW when the the image's features = 1.
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NHW_to_NHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NHW_to_NHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHW == dst_format.order) &&
+ (0 != (HAILO_FORMAT_FLAGS_HOST_ARGMAX & src_format.flags))) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ return transform__d2h_argmax_NHCW_to_NHW<uint8_t>((uint8_t*)src_ptr, src_image_shape, (uint8_t*)dst_ptr, dst_image_shape);
+ case HAILO_FORMAT_TYPE_UINT16:
+ return transform__d2h_argmax_NHCW_to_NHW<uint16_t>((uint16_t*)src_ptr, src_image_shape, (uint16_t*)dst_ptr, dst_image_shape);
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
+ (HAILO_FORMAT_ORDER_NHWC) == dst_format.order) {
+ switch (src_format.type) {
+ case HAILO_FORMAT_TYPE_UINT8:
+ transform__d2h_NHWC_to_NHWC<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+ break;
+ case HAILO_FORMAT_TYPE_UINT16:
+ transform__d2h_NHWC_to_NHWC<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+ break;
+ default:
+ LOGGER__ERROR("Invalid src-buffer's type format {}", src_format.type);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else {
+ LOGGER__ERROR("Unsupported output stream transformation from hailo_format_order_t "
+ "{} to hailo_format_order_t {}", HailoRTCommon::get_format_order_str(src_format.order),
+ HailoRTCommon::get_format_order_str(dst_format.order));
+ return HAILO_INVALID_OPERATION;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+/* Public funcs */
+hailo_status InputTransformContext::transform_inner(const void *src_ptr, void *quant_buffer, void *dst_ptr,
+ MemoryView transpose_buffer)
+{
+ void *orig_dst_ptr = nullptr;
+ hailo_3d_image_shape_t transposed_image_shape = m_src_image_shape;
+ hailo_format_t quantized_src_format = m_src_format;
+
+ if (!(m_should_quantize || m_should_transpose || m_should_reorder)) {
+ /* If transform was created without any actual use - just copy src_ptr to dst_ptr */
+ LOGGER__WARN("Transformer was created, but not needed and can be removed. copies src buffer to dst buffer");
+ auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
+ memcpy(dst_ptr, src_ptr, frame_size);
+ return HAILO_SUCCESS;
+ }
+
+ if (m_should_quantize) {
+ /* If final step - output of this quant func is the dst_ptr */
+ orig_dst_ptr = (m_should_transpose || m_should_reorder) ? quant_buffer : dst_ptr;
+ auto status = quantize_stream(src_ptr, orig_dst_ptr);
+ CHECK_SUCCESS(status);
+ src_ptr = orig_dst_ptr;
+ quantized_src_format.type = m_dst_format.type;
+ }
+
+ if (!(m_should_transpose || m_should_reorder)) {
+ /* If quantize is the only step - need to copy src buffer to dst buffer */
+ auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
+ memcpy(dst_ptr, src_ptr, frame_size);
+ return HAILO_SUCCESS;
+ }
+
+ if (m_should_transpose) {
+ if (transpose_buffer.empty()) {
+ LOGGER__ERROR("Transpose buffer not given");
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ if (transpose_buffer.size() != HailoRTCommon::get_frame_size(m_src_image_shape, quantized_src_format)) {
+ LOGGER__ERROR("Transpose buffer size mismatch (expected {}, actual {})",
+ HailoRTCommon::get_frame_size(m_src_image_shape, quantized_src_format), transpose_buffer.size());
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ /* If final step - output of this quant func is the dst_ptr */
+ orig_dst_ptr = (m_should_reorder) ? transpose_buffer.data() : dst_ptr;
+ auto status = transform__transpose_buffer(src_ptr, m_src_image_shape, quantized_src_format, orig_dst_ptr);
+ CHECK_SUCCESS(status);
+
+ src_ptr = transpose_buffer.data();
+ transposed_image_shape = transposed_shape(m_src_image_shape);
+ }
+
+ if (m_should_reorder){
+ auto status = reorder_input_stream(src_ptr, transposed_image_shape, quantized_src_format, dst_ptr,
+ m_dst_image_shape, m_dst_format);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status FrameOutputTransformContext::transform_inner(const void *src_ptr, void *dst_ptr, MemoryView transpose_buffer)
+{
+ hailo_format_t transposed_format = m_dst_format;
+ hailo_3d_image_shape_t transposed_image_shape = m_dst_image_shape;
+ transposed_format.type = m_src_format.type;
+
+ void *orig_dst_ptr = nullptr;
+ void *orig_src_ptr = nullptr;
+
+ if (!(m_should_quantize || m_should_transpose || m_should_reorder)) {
+ /* If transform context was created without any actual use - just copy src_ptr to dst_ptr */
+ LOGGER__WARN("Transform context was created, but not needed and can be removed. copies src buffer to dst buffer");
+ auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
+ memcpy(dst_ptr, src_ptr, frame_size);
+ return HAILO_SUCCESS;
+ }
+
+ if (m_should_reorder) {
+ if (m_should_transpose) {
+ /* If user needs to reorder and transform - the output of the reorder is the transform buffer*/
+ if (transpose_buffer.empty()) {
+ LOGGER__ERROR("Transpose buffer not given");
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ if (transpose_buffer.size() != HailoRTCommon::get_frame_size(m_dst_image_shape, transposed_format)) {
+ LOGGER__ERROR("Transpose buffer size mismatch (expected {}, actual {})",
+ HailoRTCommon::get_frame_size(m_dst_image_shape, transposed_format), transpose_buffer.size());
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ // Prepare transpose - the order transformation will be applied to the transpose buffer, later we will transpose
+ // from dst_ptr (transpose_buffer) to orig_dst_ptr (user buffer)
+ orig_dst_ptr = transpose_buffer.data();
+ transposed_image_shape = transposed_shape(m_dst_image_shape);
+ } else {
+ orig_dst_ptr = dst_ptr;
+ }
+ auto status = reorder_output_stream(src_ptr, m_src_image_shape, m_src_format, orig_dst_ptr, transposed_image_shape,
+ m_dst_format);
+ CHECK_SUCCESS(status);
+ }
+
+ if (m_should_transpose) {
+ orig_src_ptr = (m_should_reorder) ? orig_dst_ptr : const_cast<void *>(src_ptr);
+ auto status = transform__transpose_buffer(orig_src_ptr, transposed_image_shape, transposed_format, dst_ptr);
+ CHECK_SUCCESS(status);
+
+ transposed_image_shape = transposed_shape(transposed_image_shape);
+ }
+
+ if (m_should_quantize) {
+ auto status = quantize_stream(dst_ptr);
+ CHECK_SUCCESS(status);
+ }
+
+ if (!(m_should_transpose || m_should_reorder)) {
+ /* If quantize is the only step - need to copy src buffer to dst buffer */
+ auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
+ memcpy(dst_ptr, src_ptr, frame_size);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+
+hailo_status transform_demux_raw_frame(const void *src, uint32_t offset,
+ hailo_mux_info_t *mux_info, uint32_t mux_row_count)
+{
+ // This is a recursive function with a maximum depth of HailoRTCommon::MUX_INFO_COUNT.
+ hailo_status status = HAILO_UNINITIALIZED;
+ struct hailo_mux_info_t *predecessor = NULL;
+ uint32_t row_size = 0;
+
+ CHECK_ARG_NOT_NULL(src);
+
+ for (uint32_t i = 0; i < mux_row_count; i++) {
+ for (uint32_t j = 0; j < mux_info->successors_count; j++) {
+ predecessor = mux_info->successors[j];
+ row_size = predecessor->row_size;
+
+ if ((predecessor->info.is_mux) && (i < predecessor->rows_gcd)) {
+ status = transform_demux_raw_frame(src, offset, predecessor, predecessor->info.hw_shape.height / mux_info->rows_gcd);
+ CHECK_SUCCESS(status);
+ }
+
+ if (!(predecessor->info.is_mux)) {
+ if (predecessor->row_counter < predecessor->info.shape.height) {
+ memcpy((uint8_t*)predecessor->buffer + predecessor->current_offset, (uint8_t*)src + offset, row_size);
+ predecessor->current_offset += row_size;
+ }
+
+ predecessor->row_counter++;
+ if (predecessor->row_counter == (predecessor->info.hw_shape.height + 1)) {
+ predecessor->row_counter = 0;
+ }
+ }
+
+ offset += row_size;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status validate_input_transform_params(hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
+ hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
+{
+ /* Check quantize flags - where quantize is no needed */
+ if ((HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags)) {
+ LOGGER__ERROR("Cannot dequantize input data");
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ /* Check for overscale transformation*/
+ CHECK((hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == src_format.type) || (src_format.type >= dst_format.type),
+ HAILO_INVALID_ARGUMENT, "Overscale transformation is not supported");
+
+ /* Check device type */
+ if (!((HAILO_FORMAT_TYPE_UINT16 == dst_format.type) || (HAILO_FORMAT_TYPE_UINT8 == dst_format.type))) {
+ LOGGER__ERROR("Unsupported device-side format_type {}", dst_format.type);
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ /* Check for scaled type without quantization flag*/
+ CHECK(!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) ||
+ ((src_format.type == dst_format.type) || (hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == src_format.type)),
+ HAILO_INVALID_ARGUMENT, "src-data-type ({}) is bigger than dst-data-type ({}), and must be marked as not quantized",
+ src_format.type, dst_format.type);
+
+ /* Check reorder flags - where no reorder is needed */
+ if ((HAILO_FORMAT_ORDER_FCR == src_format.order) &&
+ (HAILO_FORMAT_ORDER_FCR == dst_format.order)) {
+ if (0 != (dst_image_shape.features % 8)) {
+ LOGGER__ERROR("HW features must be aligned to {}. passed hw features - {}",
+ HW_DATA_ALIGNMENT, dst_image_shape.features);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
+ (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
+ if (src_image_shape.features != 1) {
+ LOGGER__ERROR("Invalid Bayer user features. Expected 1, received {}", src_image_shape.features);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == src_format.order) &&
+ (HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB == dst_format.order)) {
+ if (src_image_shape.features != 1) {
+ LOGGER__ERROR("Invalid Bayer user features. Expected 1, received {}", src_image_shape.features);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ } else if ((HAILO_FORMAT_ORDER_YUY2 == src_format.order) &&
+ (HAILO_FORMAT_ORDER_YUY2 == dst_format.order)) {
+ auto shape_size = HailoRTCommon::get_shape_size(src_image_shape);
+ CHECK((shape_size % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+ "YUY2_to_YUY2 Transform shape_size must be aligned to {}", HW_DATA_ALIGNMENT);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status validate_output_transform_params(hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
+ hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
+{
+ /* Check quantize flags - where quantize is no needed */
+ if (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags)) {
+ LOGGER__ERROR("Cannot quantize output data");
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ /* Check device type */
+ if (!((HAILO_FORMAT_TYPE_UINT16 == src_format.type) || (HAILO_FORMAT_TYPE_UINT8 == src_format.type))) {
+ LOGGER__ERROR("Unsupported device-side format_type {}", dst_format.type);
+ return HAILO_INVALID_ARGUMENT;
+ }
+
+ /* Check for underscale transformation*/
+ CHECK((hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == dst_format.type) || (src_format.type <= dst_format.type),
+ HAILO_INVALID_ARGUMENT, "Underscale transformation is not supported");
+
+ /* Check for scaled type without quantization flag*/
+ CHECK(!(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags) ||
+ ((src_format.type == dst_format.type) || (hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == dst_format.type)),
+ HAILO_INVALID_ARGUMENT, "dst-data-type ({}) is bigger than src-data-type ({}), and must be marked as not quantized",
+ dst_format.type, src_format.type);
+
+ /* Check reorder flags - where no reorder is needed */
+ if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
+ (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
+ if ((src_image_shape.features != 1) || (dst_image_shape.features != 1)) {
+ LOGGER__ERROR("Invalid Bayer user or hw features. Expected 1, received user: {}, hw: {}",
+ src_image_shape.features, dst_image_shape.features);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+bool InputTransformContext::is_transformation_required(
+ const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+ const hailo_quant_info_t &quant_info)
+{
+ auto host_format = HailoRTDefaults::expand_auto_format(src_format, dst_format);
+ return TransformContextUtils::is_transformation_required(HAILO_H2D_STREAM, src_image_shape, host_format,
+ dst_image_shape, dst_format, quant_info);
+}
+
+std::string InputTransformContext::description() const
+{
+ std::stringstream transform_description;
+ bool first = true;
+
+ if (m_should_quantize) {
+ if (!first) {
+ transform_description << " | ";
+ } else {
+ first = false;
+ }
+ transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
+ }
+
+ if (m_should_transpose) {
+ if (!first) {
+ transform_description << " | ";
+ } else {
+ first = false;
+ }
+ transform_description << TransformContextUtils::make_transpose_description(m_src_image_shape, transposed_shape(m_src_image_shape));
+ }
+
+ if (m_should_reorder) {
+ if (!first) {
+ transform_description << " | ";
+ } else {
+ first = false;
+ }
+ transform_description << TransformContextUtils::make_reorder_description(m_src_format.order, m_src_image_shape, m_dst_format.order, m_dst_image_shape);
+ }
+
+ return transform_description.str();
+}
+
+Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
+{
+ auto status = validate_input_transform_params(src_image_shape, src_format, dst_image_shape, dst_format);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ const auto internal_src_format = HailoRTDefaults::expand_auto_format(src_format, dst_format);
+
+ const auto src_frame_size = HailoRTCommon::get_frame_size(src_image_shape, internal_src_format);
+ const auto dst_frame_size = HailoRTCommon::get_frame_size(dst_image_shape, dst_format);
+
+ Buffer quant_buffer;
+ bool should_quantize = TransformContextUtils::should_quantize(HAILO_H2D_STREAM, src_format, dst_format,
+ dst_quant_info);
+ if (should_quantize) {
+ auto expected_quant_buffer = Buffer::create(src_frame_size, 0);
+ CHECK_EXPECTED(expected_quant_buffer);
+ quant_buffer = expected_quant_buffer.release();
+ }
+
+ Buffer transpose_buffer;
+ bool should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
+ if (should_transpose) {
+ auto expected_transpose_buffer = Buffer::create(get_transpose_buffer_size(src_image_shape,
+ dst_format.type));
+ CHECK_EXPECTED(expected_transpose_buffer);
+ transpose_buffer = expected_transpose_buffer.release();
+ }
+
+ auto should_reorder = TransformContextUtils::should_reorder(src_image_shape, src_format, dst_image_shape, dst_format);
+
+ std::unique_ptr<InputTransformContext> transform_context(new (std::nothrow) InputTransformContext(src_frame_size, src_image_shape,
+ internal_src_format, dst_frame_size, dst_image_shape, dst_format, dst_quant_info, std::move(quant_buffer),
+ std::move(transpose_buffer), should_quantize, should_transpose, should_reorder));
+ CHECK_AS_EXPECTED(nullptr != transform_context, HAILO_OUT_OF_HOST_MEMORY);
+
+ return transform_context;
+}
+
+Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_stream_info_t &stream_info,
+ const hailo_transform_params_t &transform_params)
+{
+ return create(stream_info.shape, transform_params.user_buffer_format, stream_info.hw_shape, stream_info.format,
+ stream_info.quant_info);
+}
+
+Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_stream_info_t &stream_info, bool quantized,
+ hailo_format_type_t format_type)
+{
+ return create(stream_info, HailoRTDefaults::get_transform_params(quantized, format_type));
+}
+
+InputTransformContext::InputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer &&quant_buffer,
+ Buffer &&transpose_buffer,const bool should_quantize, const bool should_transpose, const bool should_reorder) :
+ m_src_frame_size(src_frame_size),
+ m_src_image_shape(src_image_shape),
+ m_src_format(src_format),
+ m_dst_frame_size(dst_frame_size),
+ m_dst_image_shape(dst_image_shape),
+ m_dst_format(dst_format),
+ m_dst_quant_info(dst_quant_info),
+ m_should_quantize(should_quantize),
+ m_should_transpose(should_transpose),
+ m_should_reorder(should_reorder),
+ m_quant_buffer(std::move(quant_buffer)),
+ m_transpose_buffer(std::move(transpose_buffer))
+{}
+
+hailo_status InputTransformContext::transform(const MemoryView src, MemoryView dst)
+{
+ /* Check sizes */
+ CHECK(src.size() == m_src_frame_size, HAILO_INVALID_ARGUMENT,
+ "src size must be {}. passed size - {}", m_src_frame_size, src.size());
+ CHECK(dst.size() == m_dst_frame_size, HAILO_INVALID_ARGUMENT,
+ "dst_size must be {}. passed size - {}", m_dst_frame_size, dst.size());
+
+ hailo_status status = transform_inner(src.data(),
+ quant_buffer().data(), dst.data(), transpose_buffer());
+ CHECK_SUCCESS(status);
+ return HAILO_SUCCESS;
+}
+
+size_t InputTransformContext::get_src_frame_size() const
+{
+ return m_src_frame_size;
+}
+
+size_t InputTransformContext::get_dst_frame_size() const
+{
+ return m_dst_frame_size;
+}
+
+bool OutputTransformContext::is_transformation_required(
+ const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+ const hailo_quant_info_t &quant_info)
+{
+ auto host_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
+ return TransformContextUtils::is_transformation_required(HAILO_D2H_STREAM, src_image_shape, src_format,
+ dst_image_shape, host_format, quant_info);
+}
+
+Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+{
+ auto status = validate_output_transform_params(src_image_shape, src_format, dst_image_shape, dst_format);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == src_format.order) {
+ return NMSOutputTransformContext::create(src_format, dst_format, dst_quant_info, nms_info);
+ }
+
+ return FrameOutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info);
+}
+
+Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_stream_info_t &stream_info,
+ const hailo_transform_params_t &transform_params)
+{
+ return create(stream_info.hw_shape, stream_info.format, stream_info.shape,
+ transform_params.user_buffer_format, stream_info.quant_info, stream_info.nms_info);
+}
+
+Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_stream_info_t &stream_info, bool quantized,
+ hailo_format_type_t format_type)
+{
+ return create(stream_info, HailoRTDefaults::get_transform_params(quantized, format_type));
+}
+
+OutputTransformContext::OutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const bool should_quantize,
+ const bool should_transpose, const bool should_reorder) :
+ m_src_frame_size(src_frame_size),
+ m_src_format(src_format),
+ m_dst_frame_size(dst_frame_size),
+ m_dst_format(dst_format),
+ m_dst_quant_info(dst_quant_info),
+ m_should_quantize(should_quantize),
+ m_should_transpose(should_transpose),
+ m_should_reorder(should_reorder)
+{}
+
+FrameOutputTransformContext::FrameOutputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer&& transpose_buffer,
+ const bool should_quantize, const bool should_transpose, const bool should_reorder) :
+ OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_info, should_quantize,
+ should_transpose, should_reorder), m_src_image_shape(src_image_shape), m_dst_image_shape(dst_image_shape),
+ m_transpose_buffer(std::move(transpose_buffer))
+{}
+
+Expected<std::unique_ptr<OutputTransformContext>> FrameOutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
+{
+ const auto internal_dst_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
+
+ const auto src_frame_size = HailoRTCommon::get_frame_size(src_image_shape, src_format);
+ const auto dst_frame_size = HailoRTCommon::get_frame_size(dst_image_shape, internal_dst_format);
+
+ auto should_quantize = TransformContextUtils::should_quantize(HAILO_D2H_STREAM, src_format, dst_format,
+ dst_quant_info);
+
+ Buffer transpose_buffer;
+ auto should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
+ if (should_transpose) {
+ auto expected_transpose_buffer = Buffer::create(get_transpose_buffer_size(dst_image_shape, src_format.type));
+ CHECK_EXPECTED(expected_transpose_buffer);
+ transpose_buffer = expected_transpose_buffer.release();
+ }
+
+ auto should_reorder = TransformContextUtils::should_reorder(src_image_shape, src_format, dst_image_shape, dst_format);
+
+ std::unique_ptr<OutputTransformContext> frame_transform_context = std::make_unique<FrameOutputTransformContext>(src_frame_size,
+ src_image_shape, src_format, dst_frame_size, dst_image_shape, internal_dst_format, dst_quant_info, std::move(transpose_buffer),
+ should_quantize, should_transpose, should_reorder);
+
+ CHECK_AS_EXPECTED(nullptr != frame_transform_context, HAILO_OUT_OF_HOST_MEMORY);
+
+ return frame_transform_context;
+}
+
+NMSOutputTransformContext::NMSOutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format,
+ size_t dst_frame_size, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
+ const hailo_nms_info_t &nms_info, Buffer &&quant_buffer, const bool should_quantize, const bool should_transpose) :
+ OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_info, should_quantize ,should_transpose,
+ true), m_nms_info(nms_info), m_chunk_offsets(nms_info.chunks_per_frame, 0), m_quant_buffer(std::move(quant_buffer))
+{}
+
+Expected<std::unique_ptr<OutputTransformContext>> NMSOutputTransformContext::create(const hailo_format_t &src_format,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+{
+ // Validate params
+ CHECK_AS_EXPECTED(HAILO_FORMAT_ORDER_HAILO_NMS == src_format.order, HAILO_INVALID_ARGUMENT,
+ "Format order should be HAILO_FORMAT_ORDER_HAILO_NMS");
+
+ const auto internal_dst_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
+
+ CHECK_AS_EXPECTED(HAILO_FORMAT_ORDER_HAILO_NMS == internal_dst_format.order, HAILO_INVALID_ARGUMENT,
+ "Format order should be HAILO_FORMAT_ORDER_HAILO_NMS");
+
+ if (internal_dst_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) {
+ CHECK_AS_EXPECTED(HAILO_FORMAT_TYPE_UINT16 == internal_dst_format.type, HAILO_INVALID_ARGUMENT,
+ "Format order HAILO_FORMAT_ORDER_HAILO_NMS without quantization is allowed only with type HAILO_FORMAT_TYPE_UINT16");
+ }
+ else {
+ CHECK_AS_EXPECTED((HAILO_FORMAT_TYPE_UINT16 == internal_dst_format.type) || (HAILO_FORMAT_TYPE_FLOAT32 == internal_dst_format.type),
+ HAILO_INVALID_ARGUMENT,
+ "Format order HAILO_FORMAT_ORDER_HAILO_NMS with quantization is allowed only with type HAILO_FORMAT_TYPE_UINT16 or HAILO_FORMAT_TYPE_FLOAT32");
+ }
+
+ const auto src_frame_size = HailoRTCommon::get_nms_hw_frame_size(nms_info);
+ auto dst_frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, internal_dst_format);
+
+ Buffer quant_buffer;
+ const bool should_quantize = (src_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) &&
+ !(internal_dst_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED);
+ if (should_quantize) {
+ dst_frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, internal_dst_format);
+ auto expected_nms_quant_buffer = Buffer::create(dst_frame_size, 0);
+ CHECK_EXPECTED(expected_nms_quant_buffer);
+ quant_buffer = expected_nms_quant_buffer.release();
+ }
+
+ auto should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
+
+ std::unique_ptr<OutputTransformContext> nms_transform_context = std::make_unique<NMSOutputTransformContext>(src_frame_size,
+ src_format, dst_frame_size, internal_dst_format, dst_quant_info, nms_info, std::move(quant_buffer),
+ should_quantize, should_transpose);
+ CHECK_AS_EXPECTED(nullptr != nms_transform_context, HAILO_OUT_OF_HOST_MEMORY);
+
+ return nms_transform_context;
+}
+
+hailo_status FrameOutputTransformContext::transform(const MemoryView src, MemoryView dst)
+{
+ /* Check sizes */
+ CHECK(src.size() == m_src_frame_size, HAILO_INVALID_ARGUMENT,
+ "src size must be {}. passed size - {}", m_src_frame_size, src.size());
+ CHECK(dst.size() == m_dst_frame_size, HAILO_INVALID_ARGUMENT,
+ "dst_size must be {}. passed size - {}", m_dst_frame_size, dst.size());
+
+ auto status = transform_inner(src.data(), dst.data(), MemoryView(m_transpose_buffer));
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status NMSOutputTransformContext::transform(const MemoryView src, MemoryView dst)
+{
+ /* Check sizes */
+ CHECK(src.size() == m_src_frame_size, HAILO_INVALID_ARGUMENT,
+ "src size must be {}. passed size - {}", m_src_frame_size, src.size());
+ CHECK(dst.size() == m_dst_frame_size, HAILO_INVALID_ARGUMENT,
+ "dst_size must be {}. passed size - {}", m_dst_frame_size, dst.size());
+
+ assert((HAILO_FORMAT_ORDER_HAILO_NMS == m_src_format.order) && (HAILO_FORMAT_ORDER_HAILO_NMS == m_dst_format.order));
+
+ auto shape_size = HailoRTCommon::get_nms_host_shape_size(m_nms_info);
+
+ if (!(HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags)) {
+ LOGGER__ERROR("Cannot quantize output data");
+ return HAILO_INVALID_OPERATION;
+ }
+
+ if ((HAILO_FORMAT_FLAGS_TRANSPOSED & m_src_format.flags) || (HAILO_FORMAT_FLAGS_TRANSPOSED & m_dst_format.flags)) {
+ LOGGER__ERROR("NMS doesn't support transposed format currently");
+ return HAILO_INVALID_OPERATION;
+ }
+
+ if (!((HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) &&
+ !(HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags))) {
+ transform__d2h_NMS((uint8_t*)src.data(), (uint8_t*)dst.data(), m_nms_info, m_chunk_offsets);
+ }
+ else {
+ transform__d2h_NMS((uint8_t*)src.data(), m_quant_buffer.data(), m_nms_info, m_chunk_offsets);
+ }
+
+ if ((HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags)) {
+ // NMS has to be uint16 or float32
+ switch (m_dst_format.type) {
+ case HAILO_FORMAT_TYPE_UINT16:
+ if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_nms<uint16_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
+ (uint16_t*)dst.data(), shape_size, m_dst_quant_info, m_nms_info.number_of_classes);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ case HAILO_FORMAT_TYPE_FLOAT32:
+ if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
+ Quantization::dequantize_output_buffer_nms<float32_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
+ (float32_t*)dst.data(), shape_size, m_dst_quant_info, m_nms_info.number_of_classes);
+ }
+ else {
+ return HAILO_INVALID_OPERATION;
+ }
+ break;
+ default:
+ LOGGER__ERROR("Invalid dst-buffer's type format");
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+std::string FrameOutputTransformContext::description() const
+{
+ std::stringstream transform_description;
+ bool first = true;
+
+ if (m_should_quantize) {
+ if (!first) {
+ transform_description << " | ";
+ } else {
+ first = false;
+ }
+ transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
+ }
+
+ if (m_should_transpose) {
+ if (!first) {
+ transform_description << " | ";
+ } else {
+ first = false;
+ }
+ transform_description << TransformContextUtils::make_transpose_description(m_src_image_shape, transposed_shape(m_src_image_shape));
+ }
+
+ if (m_should_reorder) {
+ if (!first) {
+ transform_description << " | ";
+ } else {
+ first = false;
+ }
+ transform_description << TransformContextUtils::make_reorder_description(m_src_format.order, m_src_image_shape, m_dst_format.order, m_dst_image_shape);
+ }
+
+ return transform_description.str();
+}
+
+std::string NMSOutputTransformContext::description() const
+{
+ std::stringstream transform_description;
+
+ transform_description << "number_of_classes: " << m_nms_info.number_of_classes <<
+ ", max_bboxes_per_class: " << m_nms_info.max_bboxes_per_class;
+
+ if (m_should_quantize) {
+ transform_description << " | " <<
+ TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
+ }
+
+ return transform_description.str();
+}
+
+size_t OutputTransformContext::get_src_frame_size() const
+{
+ return m_src_frame_size;
+}
+
+size_t OutputTransformContext::get_dst_frame_size() const
+{
+ return m_dst_frame_size;
+}
+
+Expected<std::unique_ptr<OutputDemuxer>> OutputDemuxer::create(OutputStream &output_stream)
+{
+ auto obj = OutputDemuxerBase::create(output_stream.get_frame_size(), output_stream.get_layer_info());
+ CHECK_EXPECTED(obj);
+
+ auto obj_ptr = make_unique_nothrow<OutputDemuxerBase>(obj.release());
+ CHECK_AS_EXPECTED(nullptr != obj_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return Expected<std::unique_ptr<OutputDemuxer>>(std::move(obj_ptr));
+}
+
+Expected<OutputDemuxerBase> OutputDemuxerBase::create(size_t src_frame_size, const LayerInfo &layer_info)
+{
+ // Validate params
+ CHECK_AS_EXPECTED((HAILO_FORMAT_ORDER_HAILO_NMS != layer_info.format.order), HAILO_INVALID_OPERATION,
+ "NMS layer does not support mux.");
+
+ auto mux_infos = get_mux_infos_from_layer_info(layer_info);
+ CHECK_EXPECTED(mux_infos);
+
+ return OutputDemuxerBase(src_frame_size, mux_infos.release());
+}
+
+hailo_status OutputDemuxerBase::get_mux_info_from_layer_info_impl(hailo_mux_info_t &mux_info, const LayerInfo &layer_info,
+ uint32_t &offset, uint32_t height_ratio, std::vector<hailo_mux_info_t> &res, size_t &number_of_mux_infos)
+{
+ // This is a recursive function with a maximum depth of HailoRTCommon::MUX_INFO_COUNT.
+ mux_info.info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
+
+ mux_info.row_size = height_ratio * layer_info.hw_shape.width * layer_info.hw_shape.features * layer_info.hw_data_bytes;
+ mux_info.row_counter = 0;
+
+ if (mux_info.info.is_mux) {
+ int i = 0;
+ CHECK(layer_info.predecessor.size() <= HailoRTCommon::MUX_INFO_COUNT, HAILO_INTERNAL_FAILURE, "Too many mux edges");
+ for (auto &pred : layer_info.predecessor) {
+ hailo_mux_info_t successor = {};
+ auto status = get_mux_info_from_layer_info_impl(successor,
+ pred, offset, layer_info.height_ratios[i], res, number_of_mux_infos);
+ CHECK_SUCCESS(status);
+ res.push_back(successor);
+ mux_info.successors[i] = &(res.back());
+ i++;
+ number_of_mux_infos++;
+ }
+ mux_info.successors_count = static_cast<uint32_t>(layer_info.predecessor.size());
+ mux_info.rows_gcd = layer_info.height_gcd;
+ } else {
+ mux_info.offset = offset;
+ offset += mux_info.info.hw_frame_size;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status fuse_buffers(const std::vector<MemoryView> &buffers,
+ const std::vector<hailo_nms_info_t> &infos_of_buffers, MemoryView dst)
+{
+ CHECK_ARG_NOT_NULL(dst.data());
+ CHECK(buffers.size() == infos_of_buffers.size(), HAILO_INVALID_ARGUMENT,
+ "Vectors of buffers and NMS infos does not match!");
+ CHECK(HailoRTCommon::MAX_DEFUSED_LAYER_COUNT >= buffers.size(), HAILO_INVALID_ARGUMENT,
+ "Buffers count is bigger than allowed! ({} > {})", buffers.size(), HailoRTCommon::MAX_DEFUSED_LAYER_COUNT);
+
+ // Order the buffers by their class group index, which specifies in what order they should me fused.
+ auto frames = std::vector<std::pair<const hailo_nms_info_t*, const MemoryView*>>(buffers.size());
+ for (uint32_t i = 0; i < infos_of_buffers.size(); ++i) {
+ frames[infos_of_buffers[i].defuse_info.class_group_index].first = &infos_of_buffers[i];
+ frames[infos_of_buffers[i].defuse_info.class_group_index].second = &buffers[i];
+ }
+
+ uint32_t total_num_of_classes = 0;
+ size_t total_size_of_buffers = 0;
+ for (const auto &frame_pair : frames) {
+ auto &info = *frame_pair.first;
+ auto &buffer = *frame_pair.second;
+ total_num_of_classes += info.number_of_classes * info.chunks_per_frame;
+ total_size_of_buffers += buffer.size();
+ CHECK(buffer.size() == HailoRTCommon::get_nms_hw_frame_size(info), HAILO_INVALID_ARGUMENT,
+ "Source buffer size is not same as NMS HW frame size! ({} != {})", buffer.size(),
+ HailoRTCommon::get_nms_hw_frame_size(info));
+ }
+
+ // Each frame contributes 1 extra bbox_size at the end of it which acts as a delimiter, but we don't copy those to the fused buffer.
+ // We keep the size of the dst buffer 1 bbox_size too big to stay in the format of not defused nms frames.
+ total_size_of_buffers -= (frames.size() - 1) * frames[0].first->bbox_size;
+
+ CHECK(dst.size() == total_size_of_buffers, HAILO_INVALID_ARGUMENT,
+ "Size of destination buffer is not same as the expected size of the fused frame! (size: {}, expected: {})",
+ dst.size(), total_size_of_buffers);
+
+ uint32_t offsets[HailoRTCommon::MAX_DEFUSED_LAYER_COUNT] = {0};
+ uint32_t dst_offset = 0;
+ for (uint32_t i = 0; i < total_num_of_classes; i++) {
+ size_t buff_index = (i % frames.size());
+ auto &info = *frames[buff_index].first;
+ auto &buffer = *frames[buff_index].second;
+
+ const uint8_t *src_ptr = buffer.data();
+ // TODO: Maybe change asserts to checks
+ assert(offsets[buff_index] + sizeof(nms_bbox_counter_t) <= buffer.size());
+ nms_bbox_counter_t bbox_count = *reinterpret_cast<const nms_bbox_counter_t*>(src_ptr + offsets[buff_index]);
+ uint32_t copy_size = static_cast<uint32_t>(sizeof(bbox_count) + bbox_count * info.bbox_size);
+ assert(offsets[buff_index] + copy_size <= buffer.size());
+ assert(dst_offset + copy_size <= dst.size());
+ std::copy_n(src_ptr + offsets[buff_index], copy_size, dst.data() + dst_offset);
+ offsets[buff_index] += copy_size;
+ dst_offset += copy_size;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::vector<hailo_mux_info_t>> OutputDemuxerBase::get_mux_infos_from_layer_info(const LayerInfo &layer_info)
+{
+ // Setting the first mux
+ std::vector<hailo_mux_info_t> res;
+ res.reserve(HailoRTCommon::MUX_INFO_COUNT);
+ res.push_back({});
+ uint32_t offset = 0;
+ uint32_t height_ratio = 0;
+ size_t number_of_mux_infos = 1;
+
+ auto status = get_mux_info_from_layer_info_impl(res[0], layer_info, offset, height_ratio, res, number_of_mux_infos);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ res.resize(number_of_mux_infos);
+
+ return res;
+}
+
+OutputDemuxerBase::OutputDemuxerBase(size_t src_frame_size, std::vector<hailo_mux_info_t> &&mux_infos) :
+ OutputDemuxer(src_frame_size),
+ m_mux_infos(std::move(mux_infos)) {}
+
+hailo_status OutputDemuxerBase::transform_demux(const MemoryView src, std::vector<MemoryView> &raw_buffers)
+{
+ size_t raw_buffer_index = 0;
+ size_t total_mux_sizes = 0;
+
+ CHECK(raw_buffers.size() == get_edges_stream_info().size(), HAILO_INVALID_ARGUMENT,
+ "There is a missmatch between mux edges counts ({}) and raw_buffers_size ({})", get_edges_stream_info().size(),
+ raw_buffers.size());
+
+ // Reset the runtime offset
+ for (auto &mux_edge : m_mux_infos) {
+ if (!mux_edge.info.is_mux) {
+ mux_edge.buffer = (void*)((uintptr_t)raw_buffers[raw_buffer_index].data());
+ mux_edge.current_offset = 0;
+ mux_edge.row_counter = 0;
+ CHECK((mux_edge.info.hw_frame_size == raw_buffers[raw_buffer_index].size()), HAILO_INVALID_ARGUMENT,
+ "Expected buffer size of {}, got {}", mux_edge.info.hw_frame_size, raw_buffers[raw_buffer_index].size());
+ total_mux_sizes += mux_edge.info.hw_frame_size;
+ raw_buffer_index++;
+ }
+ }
+ CHECK(total_mux_sizes == src.size(), HAILO_INVALID_ARGUMENT,
+ "src_size must be: {}, passed_size: {}", total_mux_sizes, src.size());
+
+ // TODO: Optimization - Read directly to user raw buffers (in case of NO_TRANSFORM, INPLACE_TRANSFORM)
+
+ auto first_mux_info = m_mux_infos[0];
+ return transform_demux_raw_frame(src.data(), 0, &first_mux_info, first_mux_info.rows_gcd);
+}
+
+hailo_status OutputDemuxerBase::transform_demux(const MemoryView src, const std::map<std::string, MemoryView> &dst_ptrs)
+{
+ size_t total_mux_sizes = 0;
+ // Reset the runtime offset
+ for (auto &mux_edge : m_mux_infos) {
+ if (!mux_edge.info.is_mux) {
+ auto name = std::string(mux_edge.info.name);
+ CHECK(contains(dst_ptrs, name), HAILO_INVALID_ARGUMENT, "edge name {} is not in dst_ptrs", name);
+ mux_edge.buffer = const_cast<void*>(reinterpret_cast<const void*>((dst_ptrs.at(name)).data()));
+ mux_edge.current_offset = 0;
+ mux_edge.row_counter = 0;
+ CHECK((mux_edge.info.hw_frame_size == (dst_ptrs.at(name)).size()), HAILO_INVALID_ARGUMENT,
+ "Expected buffer size of {}, got {}", mux_edge.info.hw_frame_size, (dst_ptrs.at(name)).size());
+ total_mux_sizes += mux_edge.info.hw_frame_size;
+ }
+ }
+ CHECK(total_mux_sizes == src.size(), HAILO_INVALID_ARGUMENT, "src_size must be: {}, passed_size: {}",
+ total_mux_sizes, src.size());
+
+ auto first_mux_info = m_mux_infos[0];
+ return transform_demux_raw_frame(src.data(), 0, &first_mux_info, first_mux_info.rows_gcd);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file transform_internal.hpp
+ * @brief Pre/post infer transformations
+ **/
+
+#ifndef _TRANSFORM_INTERNAL_HPP_
+#define _TRANSFORM_INTERNAL_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/hailort_common.hpp"
+#include "hailo/buffer.hpp"
+#include "hailo/hef.hpp"
+#include "hailo/transform.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "hef/layer_info.hpp"
+
+#include <map>
+#include <vector>
+
+
+namespace hailort
+{
+
+class HAILORTAPI TransformContextUtils final
+{
+public:
+ static bool is_transformation_required(const hailo_stream_direction_t stream_direction,
+ const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
+ static bool should_quantize(const hailo_stream_direction_t stream_direction,
+ const hailo_format_t &src_format, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
+ static bool should_transpose(const hailo_format_flags_t &src_flags, const hailo_format_flags_t &dst_flags);
+ static bool should_reorder(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+ const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format);
+ static std::string make_quantization_description(hailo_format_type_t src_type, hailo_format_type_t dst_type,
+ hailo_quant_info_t quant_info);
+ static std::string make_reorder_description(hailo_format_order_t src_order, hailo_3d_image_shape_t src_shape,
+ hailo_format_order_t dst_order, hailo_3d_image_shape_t dst_shape);
+ static std::string make_transpose_description(hailo_3d_image_shape_t original_shape, hailo_3d_image_shape_t transposed_shape);
+};
+
+class OutputDemuxerBase : public OutputDemuxer {
+public:
+ static Expected<OutputDemuxerBase> create(size_t src_frame_size, const LayerInfo &layer_info);
+
+ virtual std::vector<hailo_stream_info_t> get_edges_stream_info() override
+ {
+ std::vector<hailo_stream_info_t> res;
+ for (auto &info : m_mux_infos) {
+ if (!info.info.is_mux) {
+ res.push_back(info.info);
+ }
+ }
+ return res;
+ }
+
+ virtual hailo_status transform_demux(const MemoryView src, const std::map<std::string, MemoryView> &dst_ptrs) override;
+ virtual hailo_status transform_demux(const MemoryView src, std::vector<MemoryView> &raw_buffers) override;
+
+private:
+ OutputDemuxerBase(size_t src_frame_size, std::vector<hailo_mux_info_t> &&mux_infos);
+
+ static Expected<std::vector<hailo_mux_info_t>> get_mux_infos_from_layer_info(const LayerInfo &layer_info);
+ static hailo_status get_mux_info_from_layer_info_impl(hailo_mux_info_t &mux_info, const LayerInfo &layer_info,
+ uint32_t &offset, uint32_t height_ratio, std::vector<hailo_mux_info_t> &res, size_t &number_of_mux_infos);
+
+ std::vector<hailo_mux_info_t> m_mux_infos;
+};
+
+class HAILORTAPI FrameOutputTransformContext final : public OutputTransformContext
+{
+public:
+ static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info);
+
+ FrameOutputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
+ const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer&& transpose_buffer,
+ const bool should_quantize, const bool should_transpose, const bool should_reorder);
+
+ hailo_status transform_inner(const void *src_ptr, void *dst_ptr, MemoryView transpose_buffer);
+
+ hailo_status quantize_stream(const void *dst_ptr);
+
+
+ virtual hailo_status transform(const MemoryView src, MemoryView dst) override;
+ virtual std::string description() const override;
+
+private:
+ const hailo_3d_image_shape_t m_src_image_shape;
+ const hailo_3d_image_shape_t m_dst_image_shape;
+ Buffer m_transpose_buffer;
+};
+
+class HAILORTAPI NMSOutputTransformContext final : public OutputTransformContext
+{
+public:
+ static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_format_t &src_format,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info);
+
+ NMSOutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
+ const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
+ Buffer &&quant_buffer, const bool should_quantize, const bool should_transpose);
+
+ virtual hailo_status transform(const MemoryView src, MemoryView dst) override;
+ virtual std::string description() const override;
+
+private:
+
+ const hailo_nms_info_t m_nms_info;
+
+ // For each chunk contains offset of current nms class. Used here in order to avoid run-time allocations
+ std::vector<size_t> m_chunk_offsets;
+ Buffer m_quant_buffer;
+};
+
+} /* namespace hailort */
+
+#endif /* _TRANSFORM_INTERNAL_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file transform_internal.hpp
- * @brief Pre/post infer transformations
- **/
-
-#ifndef _TRANSFORM_INTERNAL_HPP_
-#define _TRANSFORM_INTERNAL_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/hailort_common.hpp"
-#include "hailo/buffer.hpp"
-#include "hailo/hef.hpp"
-#include "hailo/transform.hpp"
-#include "stream_internal.hpp"
-#include "layer_info.hpp"
-
-#include <map>
-#include <vector>
-
-namespace hailort
-{
-
-class HAILORTAPI TransformContextUtils final
-{
-public:
- static bool is_transformation_required(const hailo_stream_direction_t stream_direction,
- const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
- static bool should_quantize(const hailo_stream_direction_t stream_direction,
- const hailo_format_t &src_format, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
- static bool should_transpose(const hailo_format_flags_t &src_flags, const hailo_format_flags_t &dst_flags);
- static bool should_reorder(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format);
- static std::string make_quantization_description(hailo_format_type_t src_type, hailo_format_type_t dst_type,
- hailo_quant_info_t quant_info);
- static std::string make_reorder_description(hailo_format_order_t src_order, hailo_3d_image_shape_t src_shape,
- hailo_format_order_t dst_order, hailo_3d_image_shape_t dst_shape);
- static std::string make_transpose_description(hailo_3d_image_shape_t original_shape, hailo_3d_image_shape_t transposed_shape);
-};
-
-class OutputDemuxerBase : public OutputDemuxer {
-public:
- static Expected<OutputDemuxerBase> create(size_t src_frame_size, const LayerInfo &layer_info);
-
- virtual std::vector<hailo_stream_info_t> get_edges_stream_info() override
- {
- std::vector<hailo_stream_info_t> res;
- for (auto &info : m_mux_infos) {
- if (!info.info.is_mux) {
- res.push_back(info.info);
- }
- }
- return res;
- }
-
- virtual hailo_status transform_demux(const MemoryView src, const std::map<std::string, MemoryView> &dst_ptrs) override;
- virtual hailo_status transform_demux(const MemoryView src, std::vector<MemoryView> &raw_buffers) override;
-
-private:
- OutputDemuxerBase(size_t src_frame_size, std::vector<hailo_mux_info_t> &&mux_infos);
-
- static Expected<std::vector<hailo_mux_info_t>> get_mux_infos_from_layer_info(const LayerInfo &layer_info);
- static hailo_status get_mux_info_from_layer_info_impl(hailo_mux_info_t &mux_info, const LayerInfo &layer_info,
- uint32_t &offset, uint32_t height_ratio, std::vector<hailo_mux_info_t> &res, size_t &number_of_mux_infos);
-
- std::vector<hailo_mux_info_t> m_mux_infos;
-};
-
-class HAILORTAPI FrameOutputTransformContext final : public OutputTransformContext
-{
-public:
- static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info);
-
- FrameOutputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer&& transpose_buffer,
- const bool should_quantize, const bool should_transpose, const bool should_reorder);
-
- hailo_status transform_inner(const void *src_ptr, void *dst_ptr, MemoryView transpose_buffer);
-
- hailo_status quantize_stream(const void *dst_ptr);
-
-
- virtual hailo_status transform(const MemoryView src, MemoryView dst) override;
- virtual std::string description() const override;
-
-private:
- const hailo_3d_image_shape_t m_src_image_shape;
- const hailo_3d_image_shape_t m_dst_image_shape;
- Buffer m_transpose_buffer;
-};
-
-class HAILORTAPI NMSOutputTransformContext final : public OutputTransformContext
-{
-public:
- static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_format_t &src_format,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info);
-
- NMSOutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
- const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
- Buffer &&quant_buffer, const bool should_quantize, const bool should_transpose);
-
- virtual hailo_status transform(const MemoryView src, MemoryView dst) override;
- virtual std::string description() const override;
-
-private:
-
- const hailo_nms_info_t m_nms_info;
-
- // For each chunk contains offset of current nms class. Used here in order to avoid run-time allocations
- std::vector<size_t> m_chunk_offsets;
- Buffer m_quant_buffer;
-};
-
-} /* namespace hailort */
-
-#endif /* _TRANSFORM_INTERNAL_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file udp.cpp
- * @brief Socket wrapper for Unix
- **/
-
-#include <stdint.h>
-#include <errno.h>
-#include <string.h>
-
-#include <hailo/hailort.h>
-#include "common/utils.hpp"
-#include "common/logger_macros.hpp"
-#include "udp.hpp"
-#include "common/socket.hpp"
-#include "control_protocol.hpp"
-
-namespace hailort
-{
-
-#define MILLISECONDS_IN_SECOND (1000)
-#define MICROSECONDS_IN_MILLISECOND (1000)
-
-//initialize with padding
-uint8_t g_padded_buffer[MAX_UDP_PAYLOAD_SIZE] = {0,};
-
-hailo_status Udp::bind(struct in_addr host_ip, uint16_t host_port)
-{
- m_host_address.sin_family = AF_INET;
- m_host_address.sin_port = htons(host_port);
- m_host_address.sin_addr = host_ip;
- m_host_address_length = sizeof(m_host_address);
-
- /* Bind the socket */
- auto status = m_socket.socket_bind((struct sockaddr*)&(m_host_address), m_host_address_length);
- CHECK_SUCCESS(status);
-
- /* Save binded host address information */
- return m_socket.get_sock_name((struct sockaddr*)&(m_host_address), &m_host_address_length);
-}
-
-Expected<Udp> Udp::create(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip,
- uint16_t host_port)
-{
- auto status = HAILO_UNINITIALIZED;
- auto socket = Socket::create(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
- CHECK_EXPECTED(socket);
- auto object = Udp(device_ip, device_port, host_ip, host_port, socket.release(), status);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return object;
-}
-
-Udp::Udp(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip, uint16_t host_port,
- Socket &&socket, hailo_status &status) : m_socket(std::move(socket))
-{
- m_device_address.sin_family = AF_INET;
- m_device_address.sin_port = htons(device_port);
- m_device_address.sin_addr = device_ip;
- m_device_address_length = sizeof(m_device_address);
-
- /* Adjust socket rcv buff size */
- status = m_socket.set_recv_buffer_size_max();
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- /* Set default value timeout */
- status = set_timeout(std::chrono::milliseconds(HAILO_DEFAULT_ETH_SCAN_TIMEOUT_MS));
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- /* Set deafult max number of retries */
- status = set_max_number_of_attempts(HAILO_DEFAULT_ETH_MAX_NUMBER_OF_RETRIES);
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- /* If device address is 255.255.255.255 (broadcast), enable broadcast */
- if (INADDR_BROADCAST == m_device_address.sin_addr.s_addr) {
- status = m_socket.enable_broadcast();
- if (HAILO_SUCCESS != status) {
- return;
- }
- }
-
- /* Bind socket at the host */
- status = bind(host_ip, host_port);
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-hailo_status Udp::set_timeout(const std::chrono::milliseconds timeout_ms)
-{
- return m_socket.set_timeout(timeout_ms, &(m_timeout));
-}
-
-hailo_status Udp::send(uint8_t *buffer, size_t *size, bool use_padding, size_t max_payload_size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- size_t number_of_sent_bytes = 0;
- uint8_t *send_ptr = buffer;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(buffer);
- CHECK_ARG_NOT_NULL(size);
-
- if (use_padding) {
- if (*size > (max_payload_size - PADDING_BYTES_SIZE - PADDING_ALIGN_BYTES)) {
- *size = (max_payload_size - PADDING_BYTES_SIZE - PADDING_ALIGN_BYTES);
- }
- /*copy the data to the padded buffer and adjust the size*/
- memcpy((g_padded_buffer + PADDING_BYTES_SIZE), buffer, *size);
- send_ptr = g_padded_buffer;
- *size += PADDING_BYTES_SIZE;
- }
- else if (*size > max_payload_size) {
- *size = max_payload_size;
- }
-
- status = m_socket.send_to((const uint8_t*)send_ptr, *size, MSG_CONFIRM, (const struct sockaddr *) &m_device_address,
- m_device_address_length, &number_of_sent_bytes);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Socket send_to was aborted!");
- return status;
- }
- CHECK_SUCCESS(status);
-
- /*if we had to pad, omit the padding when returning the number of bytes*/
- if (use_padding) {
- number_of_sent_bytes -= PADDING_BYTES_SIZE;
- }
-
- /* number_of_sent_bytes will be positive because of the validation above */
- *size = (size_t)number_of_sent_bytes;
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Udp::recv(uint8_t *buffer, size_t *size)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- size_t number_of_received_bytes = 0;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(buffer);
- CHECK_ARG_NOT_NULL(size);
-
- if (*size > MAX_UDP_PAYLOAD_SIZE) {
- *size = MAX_UDP_PAYLOAD_SIZE;
- }
-
- status = m_socket.recv_from(buffer, *size, 0, (struct sockaddr *) &m_device_address, m_device_address_length,
- &number_of_received_bytes);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Socket recv_from was aborted!");
- return status;
- }
- CHECK_SUCCESS(status);
-
- *size = number_of_received_bytes;
- return HAILO_SUCCESS;
-}
-
-hailo_status Udp::abort()
-{
- return m_socket.abort();
-}
-
-hailo_status Udp::has_data(bool log_timeouts_in_debug)
-{
- return m_socket.has_data((struct sockaddr *) &m_device_address, m_device_address_length, log_timeouts_in_debug);
-}
-
-hailo_status Udp::receive_fw_response(uint8_t *buffer, size_t *size, uint32_t expected_sequence)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
-
- size_t receive_attempts = 0;
- uint32_t received_sequence = 0;
-
- ASSERT(NULL != buffer);
- ASSERT(NULL != size);
-
- for (receive_attempts = 0; receive_attempts < m_max_number_of_attempts; receive_attempts++) {
- /* Receive a single packet */
- status = recv(buffer, size);
- CHECK_SUCCESS(status);
-
- /* Get the sequence from the buffer */
- common_status = CONTROL_PROTOCOL__get_sequence_from_response_buffer(buffer, *size, &received_sequence);
- status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
- CHECK_SUCCESS(status);
-
- if (received_sequence == expected_sequence) {
- /* Received the expected response */
- break;
- } else {
- /* Invalid response was received */
- LOGGER__WARNING("Invalid sequence received (received {}, expected {}). Discarding it.", received_sequence,
- expected_sequence);
- continue;
- }
- }
- CHECK((receive_attempts < m_max_number_of_attempts), HAILO_ETH_FAILURE,
- "Received a response with an invalid sequence for {} time.", receive_attempts);
-
- return HAILO_SUCCESS;
-}
-
-
-hailo_status Udp::fw_interact_impl(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
- size_t *response_size, uint32_t expected_sequence)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- size_t expected_request_size = request_size;
- /* If the response_size value is 0, we do not expect response from the fw */
- bool expecting_response = (0 != *response_size);
-
- ASSERT(NULL != request_buffer);
- ASSERT(NULL != response_buffer);
- ASSERT(NULL != response_size);
-
- status = send(request_buffer, &request_size, false, MAX_UDP_PAYLOAD_SIZE);
- CHECK_SUCCESS(status);
-
- /* Validate all bytes were actually sent */
- CHECK(expected_request_size == request_size, HAILO_ETH_FAILURE,
- "Did not send all data at UDP__fw_interact. Expected to send: {}, actually sent: {}", expected_request_size,
- request_size);
-
- status = receive_fw_response(response_buffer, response_size, expected_sequence);
- if ((HAILO_TIMEOUT == status) && !expecting_response) {
- // This timeout was predictable
- status = HAILO_SUCCESS;
- }
- return status;
-}
-
-hailo_status Udp::fw_interact(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
- size_t *response_size, uint32_t expected_sequence)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- /* Validate arguments */
- CHECK_ARG_NOT_NULL(request_buffer);
- CHECK_ARG_NOT_NULL(response_buffer);
- CHECK_ARG_NOT_NULL(response_size);
-
- /* Not clearing the read socket before, because the FW ignores duplicated controls,
- so a leftover control response in the read socket is not possible */
-
- for (size_t attempt_number = 0; attempt_number < m_max_number_of_attempts; ++attempt_number) {
- status = fw_interact_impl(request_buffer, request_size, response_buffer, response_size, expected_sequence);
- if ((HAILO_ETH_RECV_FAILURE == status) || (HAILO_ETH_SEND_FAILURE == status) || (HAILO_TIMEOUT == status)) {
- LOGGER__WARN("Control response was not received, sending it again. Attempt number: {} (zero indexed)",
- attempt_number);
- continue;
- }
- CHECK_SUCCESS(status);
- /* Not validating amount of received bytes because we can not know how many bytes are expected */
- break;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status Udp::set_max_number_of_attempts(uint8_t max_number_of_attempts)
-{
- /* Validate arguments */
- CHECK(0 < max_number_of_attempts, HAILO_INVALID_ARGUMENT,
- "Invalid max_number_of_attempts attempt to be set. max_number_of_attempts cannot be 0.");
-
- m_max_number_of_attempts = max_number_of_attempts;
-
- return HAILO_SUCCESS;
-
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file udp.hpp
- * @brief Defines udp transport method.
- **/
-
-#ifndef __OS_UDP_H__
-#define __OS_UDP_H__
-
-#include "common/socket.hpp"
-
-#include <hailo/hailort.h>
-#include "hailo/expected.hpp"
-
-namespace hailort
-{
-
-typedef struct sockaddr_in UDP__sockaddr_in_t;
-typedef struct timeval UDP__timeout_t;
-
-class Udp final {
-public:
- static Expected<Udp> create(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip,
- uint16_t host_port);
-
- hailo_status set_timeout(const std::chrono::milliseconds timeout_ms);
- hailo_status send(uint8_t *buffer, size_t *size, bool use_padding, size_t max_payload_size);
- hailo_status recv(uint8_t *buffer, size_t *size);
- hailo_status abort();
- hailo_status has_data(bool log_timeouts_in_debug = false);
- hailo_status fw_interact(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
- size_t *response_size, uint32_t expected_sequence);
- hailo_status set_max_number_of_attempts(uint8_t max_number_of_attempts);
-
- UDP__sockaddr_in_t m_host_address;
- socklen_t m_host_address_length;
- UDP__sockaddr_in_t m_device_address;
- socklen_t m_device_address_length;
- UDP__timeout_t m_timeout;
-
-private:
- Udp(struct in_addr device_ip, uint16_t device_port, struct in_addr host_ip, uint16_t host_port,
- Socket &&socket, hailo_status &status);
-
- hailo_status bind(struct in_addr host_ip, uint16_t host_port);
- hailo_status receive_fw_response(uint8_t *buffer, size_t *size, uint32_t expected_sequence);
- hailo_status fw_interact_impl(uint8_t *request_buffer, size_t request_size, uint8_t *response_buffer,
- size_t *response_size, uint32_t expected_sequence);
-
- uint8_t m_max_number_of_attempts;
- Socket m_socket;
-};
-
-} /* namespace hailort */
-
-#endif /* __OS_UDP_H__ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hailort_common.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/hailort_logger.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/buffer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/sensor_config_utils.cpp
+)
+
+if(HAILO_BUILD_PROFILER)
+ add_subdirectory(profiler)
+endif()
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file buffer.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/buffer.hpp"
+#include "common/logger_macros.hpp"
+#include "common/utils.hpp"
+#include "common/string_utils.hpp"
+
+#include <algorithm>
+#include <string>
+#include <cstring>
+#include <iostream>
+#include <iomanip>
+
+namespace hailort
+{
+
+static void format_buffer(std::ostream& stream, const uint8_t *buffer, size_t size)
+{
+ assert(nullptr != buffer);
+
+ static const bool UPPERCASE = true;
+ static const size_t BYTES_PER_LINE = 32;
+ static const char *BYTE_DELIM = " ";
+ for (size_t offset = 0; offset < size; offset += BYTES_PER_LINE) {
+ const size_t line_size = std::min(BYTES_PER_LINE, size - offset);
+ stream << fmt::format("0x{:08X}", offset) << BYTE_DELIM; // 32 bit offset into a buffer should be enough
+ stream << StringUtils::to_hex_string(buffer + offset, line_size, UPPERCASE, BYTE_DELIM) << std::endl;
+ }
+ stream << "[size = " << std::dec << size << "]";
+}
+
+Buffer::Buffer() :
+ m_data(nullptr),
+ m_size(0)
+{}
+
+Buffer::Buffer(Buffer&& other) :
+ m_data(std::move(other.m_data)),
+ m_size(std::exchange(other.m_size, 0))
+{}
+
+Expected<Buffer> Buffer::create(size_t size)
+{
+ std::unique_ptr<uint8_t[]> data(new (std::nothrow) uint8_t[size]);
+ if (data == nullptr) {
+ LOGGER__ERROR("Failed allocating {} bytes", size);
+ return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ return Buffer(std::move(data), size);
+}
+
+Expected<Buffer> Buffer::create(size_t size, uint8_t default_value)
+{
+ auto buffer = create(size);
+ CHECK_EXPECTED(buffer);
+ std::memset(static_cast<void*>(buffer->m_data.get()), default_value, size);
+ return buffer;
+}
+
+Expected<BufferPtr> Buffer::create_shared(size_t size)
+{
+ auto buffer = Buffer::create(size);
+ CHECK_EXPECTED(buffer);
+ auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+ return buffer_ptr;
+}
+
+Expected<BufferPtr> Buffer::create_shared(size_t size, uint8_t default_value)
+{
+ auto buffer = Buffer::create(size, default_value);
+ CHECK_EXPECTED(buffer);
+ auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
+ CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
+ return buffer_ptr;
+}
+
+Expected<Buffer> Buffer::create(const uint8_t *src, size_t size)
+{
+ auto buffer = create(size);
+ CHECK_EXPECTED(buffer);
+ std::memcpy(static_cast<void*>(buffer->m_data.get()), static_cast<const void*>(src), size);
+ return buffer;
+}
+
+Expected<Buffer> Buffer::create(std::initializer_list<uint8_t> init)
+{
+ auto buffer = create(init.size());
+ CHECK_EXPECTED(buffer);
+ size_t index = 0;
+ for (const auto& n : init) {
+ // Hackzzz
+ buffer->m_data[index++] = n;
+ }
+
+ return buffer;
+}
+
+Expected<Buffer> Buffer::copy() const
+{
+ return Buffer::create(m_data.get(), m_size);
+}
+
+Buffer& Buffer::operator=(Buffer&& other)
+{
+ m_data = std::move(other.m_data);
+ m_size = std::exchange(other.m_size, 0);
+ return *this;
+}
+
+bool Buffer::operator==(const Buffer& rhs) const
+{
+ if (m_size != rhs.m_size) {
+ return false;
+ }
+ return (0 == std::memcmp(data(), rhs.data(), m_size));
+}
+
+bool Buffer::operator!=(const Buffer& rhs) const
+{
+ if (m_size != rhs.m_size) {
+ return true;
+ }
+ return (0 != std::memcmp(data(), rhs.data(), m_size));
+}
+
+uint8_t& Buffer::operator[](size_t pos)
+{
+ assert(pos < m_size);
+ return m_data[pos];
+}
+
+const uint8_t& Buffer::operator[](size_t pos) const
+{
+ assert(pos < m_size);
+ return m_data[pos];
+}
+
+Buffer::iterator Buffer::begin()
+{
+ return iterator(data());
+}
+
+Buffer::iterator Buffer::end()
+{
+ return iterator(data() + m_size);
+}
+
+uint8_t* Buffer::data() noexcept
+{
+ return m_data.get();
+}
+
+const uint8_t* Buffer::data() const noexcept
+{
+ return m_data.get();
+}
+
+size_t Buffer::size() const noexcept
+{
+ return m_size;
+}
+
+uint8_t* Buffer::release() noexcept
+{
+ m_size = 0;
+ return m_data.release();
+}
+
+std::string Buffer::to_string() const
+{
+ for (size_t i = 0; i < m_size; i++) {
+ if (m_data[i] == 0) {
+ // We'll return a string that ends at the first null in the buffer
+ return std::string(reinterpret_cast<const char*>(m_data.get()));
+ }
+ }
+
+ return std::string(reinterpret_cast<const char*>(m_data.get()), m_size);
+}
+
+// Note: This is a friend function
+std::ostream& operator<<(std::ostream& stream, const Buffer& buffer)
+{
+ format_buffer(stream, buffer.data(), buffer.size());
+ return stream;
+}
+
+uint16_t Buffer::as_uint16() const
+{
+ return as_type<uint16_t>();
+}
+
+uint32_t Buffer::as_uint32() const
+{
+ return as_type<uint32_t>();
+}
+
+uint64_t Buffer::as_uint64() const
+{
+ return as_type<uint64_t>();
+}
+
+uint16_t& Buffer::as_uint16()
+{
+ return as_type<uint16_t>();
+}
+
+uint32_t& Buffer::as_uint32()
+{
+ return as_type<uint32_t>();
+}
+
+uint64_t& Buffer::as_uint64()
+{
+ return as_type<uint64_t>();
+}
+
+Buffer::Buffer(std::unique_ptr<uint8_t[]> data, size_t size) :
+ m_data(std::move(data)),
+ m_size(size)
+ {}
+
+MemoryView::MemoryView() :
+ m_data(nullptr),
+ m_size(0)
+{}
+
+MemoryView::MemoryView(Buffer &buffer) :
+ m_data(buffer.data()),
+ m_size(buffer.size())
+{}
+
+MemoryView::MemoryView(void *data, size_t size) :
+ m_data(data),
+ m_size(size)
+{}
+
+const MemoryView MemoryView::create_const(const void *data, size_t size)
+{
+ return std::move(MemoryView(const_cast<void *>(data), size));
+}
+
+uint8_t* MemoryView::data() noexcept
+{
+ return reinterpret_cast<uint8_t*>(m_data);
+}
+
+const uint8_t* MemoryView::data() const noexcept
+{
+ return reinterpret_cast<const uint8_t*>(m_data);
+}
+
+size_t MemoryView::size() const noexcept
+{
+ return m_size;
+}
+
+bool MemoryView::empty() const noexcept
+{
+ return (m_data == nullptr);
+}
+
+// Note: This is a friend function
+std::ostream& operator<<(std::ostream& stream, const MemoryView& buffer)
+{
+ format_buffer(stream, buffer.data(), buffer.size());
+ return stream;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file event.hpp
+ * @brief Event and Semaphore wrapper objects used for multithreading
+ **/
+
+#ifndef _EVENT_INTERNAL_HPP_
+#define _EVENT_INTERNAL_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include <memory>
+#include <vector>
+#include <array>
+#include <chrono>
+#if defined(__GNUC__)
+#include <poll.h>
+#endif
+
+namespace hailort
+{
+
+// TODO: Replace with a static wait_multiple func belonging to Waitable (SDK-16567).
+// Will get a vector of pointers as an argument. Can also use variadic
+// template args for cases with fixed number Waitables
+class WaitOrShutdown final
+{
+public:
+ WaitOrShutdown(WaitablePtr waitable, EventPtr shutdown_event);
+ ~WaitOrShutdown() = default;
+
+ WaitOrShutdown(const WaitOrShutdown &other) = delete;
+ WaitOrShutdown &operator=(const WaitOrShutdown &other) = delete;
+ WaitOrShutdown(WaitOrShutdown &&other) noexcept = default;
+ WaitOrShutdown &operator=(WaitOrShutdown &&other) = delete;
+
+ // Waits on waitable or shutdown_event to be signaled:
+ // * If shutdown_event is signaled:
+ // - shutdown_event is not reset
+ // - HAILO_SHUTDOWN_EVENT_SIGNALED is returned
+ // * If waitable is signaled:
+ // - waitable is reset if waitable->is_auto_reset()
+ // - HAILO_SUCCESS is returned
+ // * If both waitable and shutdown_event are signaled:
+ // - shutdown_event is not reset
+ // - waitable is not reset
+ // - HAILO_SHUTDOWN_EVENT_SIGNALED is returned
+ // * If neither are signaled, then HAILO_TIMEOUT is returned
+ // * On any failure an appropriate status shall be returned
+ hailo_status wait(std::chrono::milliseconds timeout);
+ hailo_status signal();
+
+private:
+ // Note: We want to guarantee that if the shutdown event is signaled, HAILO_SHUTDOWN_EVENT_SIGNALED will be
+ // returned.
+ // * In Unix, using poll this isn't a problem since we'll get all the readable fds in a single call.
+ // * In Windows, using WaitForMultipleObjects, this works differently (from msdn):
+ // If bWaitAll is FALSE, the return value minus WAIT_OBJECT_0 indicates the lpHandles array index
+ // of the object that satisfied the wait. If more than one object became signaled during the call,
+ // this is the array index of the signaled object with the smallest index value of all the signaled
+ // objects.
+ // (https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitformultipleobjects)
+ // * Hence, SHUTDOWN_INDEX must come before WAITABLE_INDEX!
+ static const size_t SHUTDOWN_INDEX = 0;
+ static const size_t WAITABLE_INDEX = 1;
+ #if defined(_MSC_VER) || defined(__QNX__)
+ using WaitHandleArray = std::array<underlying_waitable_handle_t, 2>;
+ #else
+ using WaitHandleArray = std::array<struct pollfd, 2>;
+ #endif
+
+ const WaitablePtr m_waitable;
+ const EventPtr m_shutdown_event;
+ WaitHandleArray m_wait_handle_array;
+
+ static WaitHandleArray create_wait_handle_array(WaitablePtr waitable, EventPtr shutdown_event);
+};
+
+} /* namespace hailort */
+
+#endif /* _EVENT_INTERNAL_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_common.cpp
+ * @brief Implementation of common hailort utilities
+ **/
+
+#include "hailo/hailort_common.hpp"
+#include "common/utils.hpp"
+
+namespace hailort
+{
+
+// Needed for the linker
+const uint32_t HailoRTCommon::BBOX_PARAMS;
+const uint32_t HailoRTCommon::MAX_DEFUSED_LAYER_COUNT;
+const size_t HailoRTCommon::HW_DATA_ALIGNMENT;
+const uint64_t HailoRTCommon::NMS_DELIMITER;
+const uint64_t HailoRTCommon::NMS_DUMMY_DELIMITER;
+
+Expected<hailo_device_id_t> HailoRTCommon::to_device_id(const std::string &device_id)
+{
+ hailo_device_id_t id = {};
+ static constexpr size_t id_size = ARRAY_ENTRIES(id.id);
+
+ CHECK_AS_EXPECTED(device_id.size() < id_size, HAILO_INTERNAL_FAILURE,
+ "Device '{}' has a too long id (max is {})", device_id, id_size);
+
+ strncpy(id.id, device_id.c_str(), id_size - 1);
+ id.id[id_size - 1] = 0;
+ return id;
+}
+
+Expected<std::vector<hailo_device_id_t>> HailoRTCommon::to_device_ids_vector(const std::vector<std::string> &device_ids_str)
+{
+ std::vector<hailo_device_id_t> device_ids_vector;
+ device_ids_vector.reserve(device_ids_str.size());
+ for (const auto &device_id_str : device_ids_str) {
+ auto device_id_struct = to_device_id(device_id_str);
+ CHECK_EXPECTED(device_id_struct);
+ device_ids_vector.push_back(device_id_struct.release());
+ }
+ return device_ids_vector;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file hailort_logger.cpp
+ * @brief Implements logger used by hailort.
+ **/
+
+#include "common/utils.hpp"
+#include "common/filesystem.hpp"
+
+#include "utils/hailort_logger.hpp"
+
+#include <spdlog/sinks/basic_file_sink.h>
+#include <spdlog/sinks/rotating_file_sink.h>
+#include <spdlog/sinks/stdout_color_sinks.h>
+#include <spdlog/sinks/android_sink.h>
+#include <spdlog/sinks/null_sink.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <iostream>
+#include <iomanip>
+#ifdef _WIN32
+#include <shlwapi.h>
+#include <shlobj.h>
+#endif
+
+
+namespace hailort
+{
+
+#define MAX_LOG_FILE_SIZE (1024 * 1024) // 1MB
+
+#define HAILORT_NAME ("HailoRT")
+#define HAILORT_LOGGER_FILENAME ("hailort.log")
+#define HAILORT_MAX_NUMBER_OF_LOG_FILES (1) // There will be 2 log files - 1 spare
+#ifdef NDEBUG
+#define HAILORT_CONSOLE_LOGGER_PATTERN ("[%n] [%^%l%$] %v") // Console logger will print: [hailort] [log level] msg
+#else
+#define HAILORT_CONSOLE_LOGGER_PATTERN ("[%Y-%m-%d %X.%e] [%P] [%t] [%n] [%^%l%$] [%s:%#] [%!] %v") // Console logger will print: [timestamp] [PID] [TID] [hailort] [log level] [source file:line number] [function name] msg
+#endif
+#define HAILORT_MAIN_FILE_LOGGER_PATTERN ("[%Y-%m-%d %X.%e] [%P] [%t] [%n] [%l] [%s:%#] [%!] %v") // File logger will print: [timestamp] [PID] [TID] [hailort] [log level] [source file:line number] [function name] msg
+#define HAILORT_LOCAL_FILE_LOGGER_PATTERN ("[%Y-%m-%d %X.%e] [%t] [%n] [%l] [%s:%#] [%!] %v") // File logger will print: [timestamp] [TID] [hailort] [log level] [source file:line number] [function name] msg
+#define HAILORT_ANDROID_LOGGER_PATTERN ("%v") // Android logger will print only message (additional info are built-in)
+
+#define HAILORT_LOGGER_PATH_ENV_VAR ("HAILORT_LOGGER_PATH")
+
+#ifdef _WIN32
+#define PATH_SEPARATOR "\\"
+#else
+#define PATH_SEPARATOR "/"
+#endif
+
+std::string HailoRTLogger::parse_log_path(const char *log_path)
+{
+ if ((nullptr == log_path) || (std::strlen(log_path) == 0)) {
+ return ".";
+ }
+
+ std::string log_path_str(log_path);
+ if (log_path_str == "NONE") {
+ return "";
+ }
+
+ return log_path_str;
+}
+
+std::string HailoRTLogger::get_log_path(const std::string &path_env_var)
+{
+ auto log_path_c_str = std::getenv(path_env_var.c_str());
+ return parse_log_path(log_path_c_str);
+}
+
+std::string HailoRTLogger::get_main_log_path()
+{
+ std::string local_log_path = get_log_path(HAILORT_LOGGER_PATH_ENV_VAR);
+ if (local_log_path.length() == 0) {
+ return "";
+ }
+
+#ifdef _WIN32
+ // See https://stackoverflow.com/questions/2899013/how-do-i-get-the-application-data-path-in-windows-using-c
+ TCHAR local_app_data_path[MAX_PATH];
+ auto result = SHGetFolderPath(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, local_app_data_path);
+ if (!SUCCEEDED(result)) {
+ std::cerr << "Cannot resolve Local Application Data directory path" << std::endl;
+ return "";
+ }
+
+ const auto hailo_dir_path = std::string(local_app_data_path) + PATH_SEPARATOR + "Hailo";
+ const auto full_path = hailo_dir_path + PATH_SEPARATOR + "HailoRT";
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ TCHAR program_data_path[MAX_PATH];
+ auto ret_val = SHGetFolderPath(NULL, CSIDL_COMMON_APPDATA, NULL, 0, program_data_path);
+ if (!SUCCEEDED(ret_val)) {
+ std::cerr << "Cannot resolve ProgramData directory path" << std::endl;
+ return "";
+ }
+
+ const auto hailort_service_dir_path = std::string(program_data_path) + PATH_SEPARATOR + "HailoRT_Service";
+ auto create_status = Filesystem::create_directory(hailort_service_dir_path);
+ if (HAILO_SUCCESS != create_status) {
+ std::cerr << "Cannot create directory at path " << hailort_service_dir_path << std::endl;
+ return "";
+ }
+
+ const auto hailort_service_full_path = std::string(program_data_path) + PATH_SEPARATOR + "HailoRT_Service" + PATH_SEPARATOR + "logs";
+ create_status = Filesystem::create_directory(hailort_service_full_path);
+ if (HAILO_SUCCESS != create_status) {
+ std::cerr << "Cannot create directory at path " << hailort_service_full_path << std::endl;
+ return "";
+ }
+#endif
+
+#else
+ const auto hailo_dir_path = Filesystem::get_home_directory() + PATH_SEPARATOR + ".hailo";
+ const auto full_path = hailo_dir_path + PATH_SEPARATOR + "hailort";
+#endif
+
+ auto status = Filesystem::create_directory(hailo_dir_path);
+ if (HAILO_SUCCESS != status) {
+ std::cerr << "Cannot create directory at path " << hailo_dir_path << std::endl;
+ return "";
+ }
+
+ status = Filesystem::create_directory(full_path);
+ if (HAILO_SUCCESS != status) {
+ std::cerr << "Cannot create directory at path " << full_path << std::endl;
+ return "";
+ }
+
+ return full_path;
+}
+
+std::shared_ptr<spdlog::sinks::sink> HailoRTLogger::create_file_sink(const std::string &dir_path, const std::string &filename, bool rotate)
+{
+ if ("" == dir_path) {
+ return make_shared_nothrow<spdlog::sinks::null_sink_st>();
+ }
+
+ if (!Filesystem::is_path_accesible(dir_path)) {
+ std::cerr << "HailoRT warning: Cannot create log file " << filename
+ << "! Please check the directory " << dir_path << " write permissions." << std::endl;
+ // Create null sink instead (Will throw away its log)
+ return make_shared_nothrow<spdlog::sinks::null_sink_st>();
+ }
+
+ const auto file_path = dir_path + PATH_SEPARATOR + filename;
+ if (Filesystem::does_file_exists(file_path) && !Filesystem::is_path_accesible(file_path)) {
+ std::cerr << "HailoRT warning: Cannot create log file " << filename
+ << "! Please check the file " << file_path << " write permissions." << std::endl;
+ // Create null sink instead (Will throw away its log)
+ return make_shared_nothrow<spdlog::sinks::null_sink_st>();
+ }
+
+ if (rotate) {
+ return make_shared_nothrow<spdlog::sinks::rotating_file_sink_mt>(file_path, MAX_LOG_FILE_SIZE, HAILORT_MAX_NUMBER_OF_LOG_FILES);
+ }
+
+ return make_shared_nothrow<spdlog::sinks::basic_file_sink_mt>(file_path);
+}
+
+HailoRTLogger::HailoRTLogger() :
+ m_console_sink(make_shared_nothrow<spdlog::sinks::stderr_color_sink_mt>()),
+#ifdef __ANDROID__
+ m_main_log_file_sink(make_shared_nothrow<spdlog::sinks::android_sink_mt>(HAILORT_NAME))
+#else
+ m_main_log_file_sink(create_file_sink(get_main_log_path(), HAILORT_LOGGER_FILENAME, true)),
+ m_local_log_file_sink(create_file_sink(get_log_path(HAILORT_LOGGER_PATH_ENV_VAR), HAILORT_LOGGER_FILENAME, true))
+#endif
+{
+
+#ifdef __ANDROID__
+ m_main_log_file_sink->set_pattern(HAILORT_ANDROID_LOGGER_PATTERN);
+#else
+ m_main_log_file_sink->set_pattern(HAILORT_MAIN_FILE_LOGGER_PATTERN);
+ m_local_log_file_sink->set_pattern(HAILORT_LOCAL_FILE_LOGGER_PATTERN);
+#endif
+
+ // TODO: Handle null pointers for logger and sinks
+ m_console_sink->set_pattern(HAILORT_CONSOLE_LOGGER_PATTERN);
+ spdlog::sinks_init_list sink_list = { m_console_sink, m_main_log_file_sink, m_local_log_file_sink };
+ m_hailort_logger = make_shared_nothrow<spdlog::logger>(HAILORT_NAME, sink_list.begin(), sink_list.end());
+
+#ifdef NDEBUG
+ set_levels(spdlog::level::warn, spdlog::level::info, spdlog::level::warn);
+#else
+ set_levels(spdlog::level::warn, spdlog::level::debug, spdlog::level::debug);
+#endif
+ spdlog::set_default_logger(m_hailort_logger);
+}
+
+std::shared_ptr<spdlog::logger> HailoRTLogger::logger()
+{
+ return m_hailort_logger;
+}
+
+void HailoRTLogger::set_levels(spdlog::level::level_enum console_level,
+ spdlog::level::level_enum file_level, spdlog::level::level_enum flush_level)
+{
+ m_console_sink->set_level(console_level);
+ m_main_log_file_sink->set_level(file_level);
+ m_local_log_file_sink->set_level(file_level);
+ m_hailort_logger->flush_on(flush_level);
+}
+
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file logger_macros.hpp
+ * @brief Declares logger used by hailort.
+ **/
+
+#ifndef _HAILORT_LOGGER_HPP_
+#define _HAILORT_LOGGER_HPP_
+
+
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+
+#include "hailo/hailort.h"
+#include "common/logger_macros.hpp"
+
+namespace hailort
+{
+
+class HailoRTLogger {
+public:
+ static HailoRTLogger& get_instance()
+ {
+ static HailoRTLogger instance;
+ return instance;
+ }
+ HailoRTLogger(HailoRTLogger const&) = delete;
+ void operator=(HailoRTLogger const&) = delete;
+
+ std::shared_ptr<spdlog::logger> logger();
+ void set_levels(spdlog::level::level_enum console_level, spdlog::level::level_enum file_level,
+ spdlog::level::level_enum flush_level);
+ static std::string get_log_path(const std::string &path_env_var);
+ static std::string get_main_log_path();
+ static std::shared_ptr<spdlog::sinks::sink> create_file_sink(const std::string &dir_path, const std::string &filename, bool rotate);
+
+private:
+ HailoRTLogger();
+ static std::string parse_log_path(const char *log_path);
+
+ std::shared_ptr<spdlog::sinks::sink> m_console_sink;
+
+ // The main log will written to a centralized directory (home directory)
+ // The local log will be written to the local directory or to the path the user has chosen (via $HAILORT_LOGGER_PATH)
+ std::shared_ptr<spdlog::sinks::sink> m_main_log_file_sink;
+ std::shared_ptr<spdlog::sinks::sink> m_local_log_file_sink;
+ std::shared_ptr<spdlog::logger> m_hailort_logger;
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILORT_LOGGER_HPP_ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/tracer.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file tracer.cpp
+ * @brief: Tracing mechanism for HailoRT + FW events
+ *
+ **/
+
+#include "common/utils.hpp"
+
+#include "utils/hailort_logger.hpp"
+#include "utils/profiler/tracer.hpp"
+
+#include <spdlog/sinks/rotating_file_sink.h>
+#include <spdlog/sinks/stdout_color_sinks.h>
+#include <spdlog/sinks/android_sink.h>
+#include <spdlog/sinks/null_sink.h>
+#include <iomanip>
+#include <sstream>
+
+
+#define SCHEDULER_PROFILER_NAME ("SchedulerProfiler")
+#define SCHEDULER_PROFILER_LOGGER_FILENAME ("scheduler_profiler.json")
+#define SCHEDULER_PROFILER_LOGGER_PATTERN ("%v")
+
+#define SCHEDULER_PROFILER_LOGGER_PATH ("SCHEDULER_PROFILER_LOGGER_PATH")
+
+#define PROFILER_ENV_VAR ("HAILO_ENABLE_PROFILER")
+
+namespace hailort
+{
+
+Tracer::Tracer()
+{
+ auto should_trace_env = std::getenv(PROFILER_ENV_VAR);
+ m_should_trace = ((nullptr != should_trace_env) && (strnlen(should_trace_env, 2) == 1) && (strncmp(should_trace_env, "1", 1) == 0));
+ if (m_should_trace) {
+ m_start_time = std::chrono::high_resolution_clock::now();
+ int64_t time_since_epoch = std::chrono::duration_cast<std::chrono::nanoseconds>(m_start_time.time_since_epoch()).count();
+ m_handlers.push_back(std::make_unique<SchedulerProfilerHandler>(time_since_epoch));
+ }
+}
+
+SchedulerProfilerHandler::SchedulerProfilerHandler(int64_t &start_time)
+#ifndef __ANDROID__
+ : m_file_sink(HailoRTLogger::create_file_sink(HailoRTLogger::get_log_path(SCHEDULER_PROFILER_LOGGER_PATH), SCHEDULER_PROFILER_LOGGER_FILENAME, false)),
+ m_first_write(true)
+#endif
+{
+#ifndef __ANDROID__
+ spdlog::sinks_init_list sink_list = { m_file_sink };
+ m_profiler_logger = make_shared_nothrow<spdlog::logger>(SCHEDULER_PROFILER_NAME, sink_list.begin(), sink_list.end());
+ m_file_sink->set_level(spdlog::level::level_enum::info);
+ m_file_sink->set_pattern(SCHEDULER_PROFILER_LOGGER_PATTERN);
+ std::stringstream ss;
+ ss << "{\"ns_since_epoch_zero_time\": \"" << start_time << "\",\n\"scheduler_actions\": [\n";
+ m_profiler_logger->info(ss.str());
+#else
+ (void)start_time;
+#endif
+}
+
+SchedulerProfilerHandler::~SchedulerProfilerHandler()
+{
+ m_profiler_logger->info("]\n}");
+}
+
+struct JSON
+{
+ std::unordered_map<std::string, std::string> members;
+ JSON(const std::initializer_list<std::pair<const std::string, std::string>> &dict) : members{dict} {}
+ JSON(const std::unordered_map<std::string, uint32_t> &dict) {
+ for (auto &pair : dict) {
+ members.insert({pair.first, std::to_string(pair.second)});
+ }
+ }
+};
+
+template<class T>
+std::string json_to_string(const T &val) {
+ return std::to_string(val);
+}
+
+template<>
+std::string json_to_string(const std::string &val) {
+ std::ostringstream os;
+ os << std::quoted(val);
+ return os.str();
+}
+
+template<>
+std::string json_to_string(const bool &bool_val) {
+ return bool_val ? "true" : "false";
+}
+
+template<>
+std::string json_to_string(const JSON &json_val) {
+ std::ostringstream os;
+ os << "{\n";
+ size_t i = 0;
+ for (const auto &kv : json_val.members) {
+ ++i;
+ os << std::quoted(kv.first) << " : ";
+ os << kv.second;
+ if (i != json_val.members.size()) {
+ os << ",\n";
+ }
+ }
+ os << "\n}";
+ return os.str();
+}
+
+bool SchedulerProfilerHandler::comma()
+{
+ auto result = !m_first_write;
+ m_first_write = false;
+ return result;
+}
+
+void SchedulerProfilerHandler::log(JSON json)
+{
+ m_profiler_logger->info("{}{}", comma() ? ",\n" : "", json_to_string(json));
+}
+
+void SchedulerProfilerHandler::handle_trace(const AddCoreOpTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_name", json_to_string(trace.core_op_name)},
+ {"core_op_handle", json_to_string(trace.core_op_handle)},
+ {"timeout", json_to_string((uint64_t)trace.timeout)},
+ {"threshold", json_to_string((uint64_t)trace.threshold)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const CreateCoreOpInputStreamsTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_name", json_to_string(trace.core_op_name)},
+ {"stream_name", json_to_string(trace.stream_name)},
+ {"queue_size", json_to_string(trace.queue_size)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const CreateCoreOpOutputStreamsTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_name", json_to_string(trace.core_op_name)},
+ {"stream_name", json_to_string(trace.stream_name)},
+ {"queue_size", json_to_string(trace.queue_size)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const WriteFrameTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_handle", json_to_string(trace.core_op_handle)},
+ {"queue_name", json_to_string(trace.queue_name)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const InputVdmaDequeueTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_handle", json_to_string(trace.core_op_handle)},
+ {"queue_name", json_to_string(trace.queue_name)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const ReadFrameTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_handle", json_to_string(trace.core_op_handle)},
+ {"queue_name", json_to_string(trace.queue_name)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const OutputVdmaEnqueueTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_handle", json_to_string(trace.core_op_handle)},
+ {"queue_name", json_to_string(trace.queue_name)},
+ {"frames", json_to_string(trace.frames)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const ChooseCoreOpTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"chosen_core_op_handle", json_to_string(trace.core_op_handle)},
+ {"threshold", json_to_string(trace.threshold)},
+ {"timeout", json_to_string(trace.timeout)},
+ {"priority", json_to_string(trace.priority)}
+ }));
+}
+
+void SchedulerProfilerHandler::handle_trace(const SwitchCoreOpTrace &trace)
+{
+ log(JSON({
+ {"action", json_to_string(trace.name)},
+ {"timestamp", json_to_string(trace.timestamp)},
+ {"device_id", json_to_string(trace.device_id)},
+ {"core_op_handle", json_to_string(trace.core_op_handle)}
+ }));
+}
+
+
+}
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file tracer.hpp
+ * @brief Tracing mechanism for HailoRT + FW events
+ **/
+
+#ifndef _HAILO_TRACER_HPP_
+#define _HAILO_TRACER_HPP_
+
+#include "hailo/hailort.h"
+#include "common/logger_macros.hpp"
+
+#include "vdevice/scheduler/scheduler_base.hpp"
+
+#include <chrono>
+#include <memory>
+#include <vector>
+#include <map>
+#include <unordered_map>
+#include <atomic>
+#include <chrono>
+#include <sstream>
+#include <iomanip>
+
+
+namespace hailort
+{
+
+struct Trace
+{
+ Trace(const std::string &name)
+ : name(name)
+ {}
+
+ virtual ~Trace() = default;
+
+ uint64_t timestamp = 0;
+ std::string name;
+};
+
+struct InitTrace : Trace
+{
+ InitTrace() : Trace("init") {}
+};
+
+struct AddCoreOpTrace : Trace
+{
+ AddCoreOpTrace(const std::string &device_id, const std::string &core_op_name, uint64_t timeout, uint32_t threshold, scheduler_core_op_handle_t handle)
+ : Trace("add_core_op"), device_id(device_id), core_op_name(core_op_name), timeout(timeout), threshold(threshold), core_op_handle(handle)
+ {}
+
+ std::string device_id;
+ std::string core_op_name;
+ uint64_t timeout = 0;
+ uint32_t threshold = 0;
+ scheduler_core_op_handle_t core_op_handle = INVALID_CORE_OP_HANDLE;
+};
+
+struct CreateCoreOpInputStreamsTrace : Trace
+{
+ CreateCoreOpInputStreamsTrace(const std::string &device_id, const std::string &core_op_name, const std::string &stream_name, uint32_t queue_size)
+ : Trace("create_input_stream"), device_id(device_id), core_op_name(core_op_name), stream_name(stream_name), queue_size(queue_size)
+ {}
+
+ std::string device_id;
+ std::string core_op_name;
+ std::string stream_name;
+ uint32_t queue_size;
+};
+
+struct CreateCoreOpOutputStreamsTrace : Trace
+{
+ CreateCoreOpOutputStreamsTrace(const std::string &device_id, const std::string &core_op_name, const std::string &stream_name, uint32_t queue_size)
+ : Trace("create_output_stream"), device_id(device_id), core_op_name(core_op_name), stream_name(stream_name), queue_size(queue_size)
+ {}
+
+ std::string device_id;
+ std::string core_op_name;
+ std::string stream_name;
+ uint32_t queue_size;
+};
+
+struct WriteFrameTrace : Trace
+{
+ WriteFrameTrace(const std::string &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
+ : Trace("write_frame"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name)
+ {}
+
+ std::string device_id;
+ scheduler_core_op_handle_t core_op_handle;
+ std::string queue_name;
+};
+
+struct InputVdmaDequeueTrace : Trace
+{
+ InputVdmaDequeueTrace(const std::string &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
+ : Trace("input_vdma_dequeue"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name)
+ {}
+
+ std::string device_id;
+ scheduler_core_op_handle_t core_op_handle;
+ std::string queue_name;
+};
+
+struct ReadFrameTrace : Trace
+{
+ ReadFrameTrace(const std::string &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
+ : Trace("read_frame"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name)
+ {}
+
+ std::string device_id;
+ scheduler_core_op_handle_t core_op_handle;
+ std::string queue_name;
+};
+
+struct OutputVdmaEnqueueTrace : Trace
+{
+ OutputVdmaEnqueueTrace(const std::string &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name, uint32_t frames)
+ : Trace("output_vdma_enqueue"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name), frames(frames)
+ {}
+
+ std::string device_id;
+ scheduler_core_op_handle_t core_op_handle;
+ std::string queue_name;
+ uint32_t frames = 0;
+};
+
+struct ChooseCoreOpTrace : Trace
+{
+ ChooseCoreOpTrace(const std::string &device_id, scheduler_core_op_handle_t handle, bool threshold, bool timeout, core_op_priority_t priority)
+ : Trace("choose_core_op"), device_id(device_id), core_op_handle(handle), threshold(threshold), timeout(timeout), priority(priority)
+ {}
+
+ std::string device_id;
+ scheduler_core_op_handle_t core_op_handle;
+ bool threshold = false;
+ bool timeout = false;
+ core_op_priority_t priority;
+};
+
+struct SwitchCoreOpTrace : Trace
+{
+ SwitchCoreOpTrace(const std::string &device_id, scheduler_core_op_handle_t handle)
+ : Trace("switch_core_op"), device_id(device_id), core_op_handle(handle)
+ {}
+
+ std::string device_id;
+ scheduler_core_op_handle_t core_op_handle;
+};
+
+class Handler
+{
+public:
+ virtual ~Handler() = default;
+
+ virtual void handle_trace(const InitTrace&) {};
+ virtual void handle_trace(const AddCoreOpTrace&) {};
+ virtual void handle_trace(const CreateCoreOpInputStreamsTrace&) {};
+ virtual void handle_trace(const CreateCoreOpOutputStreamsTrace&) {};
+ virtual void handle_trace(const WriteFrameTrace&) {};
+ virtual void handle_trace(const InputVdmaDequeueTrace&) {};
+ virtual void handle_trace(const ReadFrameTrace&) {};
+ virtual void handle_trace(const OutputVdmaEnqueueTrace&) {};
+ virtual void handle_trace(const ChooseCoreOpTrace&) {};
+ virtual void handle_trace(const SwitchCoreOpTrace&) {};
+};
+
+struct JSON;
+
+class SchedulerProfilerHandler : public Handler
+{
+public:
+ SchedulerProfilerHandler(SchedulerProfilerHandler const&) = delete;
+ void operator=(SchedulerProfilerHandler const&) = delete;
+
+ SchedulerProfilerHandler(int64_t &start_time);
+ ~SchedulerProfilerHandler();
+
+ virtual void handle_trace(const AddCoreOpTrace&) override;
+ virtual void handle_trace(const CreateCoreOpInputStreamsTrace&) override;
+ virtual void handle_trace(const CreateCoreOpOutputStreamsTrace&) override;
+ virtual void handle_trace(const WriteFrameTrace&) override;
+ virtual void handle_trace(const InputVdmaDequeueTrace&) override;
+ virtual void handle_trace(const ReadFrameTrace&) override;
+ virtual void handle_trace(const OutputVdmaEnqueueTrace&) override;
+ virtual void handle_trace(const ChooseCoreOpTrace&) override;
+ virtual void handle_trace(const SwitchCoreOpTrace&) override;
+
+private:
+ void log(JSON json);
+ bool comma();
+
+ std::shared_ptr<spdlog::sinks::sink> m_file_sink;
+ std::shared_ptr<spdlog::logger> m_profiler_logger;
+ std::atomic<bool> m_first_write;
+};
+
+class Tracer
+{
+public:
+ template<class TraceType, typename... Args>
+ static void trace(Args... trace_args)
+ {
+ auto &tracer = get_instance();
+ tracer.execute_trace<TraceType>(trace_args...);
+ }
+
+private:
+ Tracer();
+
+ static Tracer& get_instance()
+ {
+ static Tracer tracer;
+ return tracer;
+ }
+
+ template<class TraceType, typename... Args>
+ void execute_trace(Args... trace_args)
+ {
+ if (!m_should_trace) {
+ return;
+ }
+
+ TraceType trace_struct(trace_args...);
+ auto curr_time = std::chrono::high_resolution_clock::now();
+ trace_struct.timestamp = std::chrono::duration_cast<std::chrono::nanoseconds>(curr_time - this->m_start_time).count();
+ for (auto &handler : this->m_handlers) {
+ handler->handle_trace(trace_struct);
+ }
+ }
+
+ bool m_should_trace = false;
+ std::chrono::high_resolution_clock::time_point m_start_time;
+ std::vector<std::unique_ptr<Handler>> m_handlers;
+};
+
+}
+
+#endif
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file tracer_macros.hpp
+ * @brief Macros for tracing mechanism for HailoRT + FW events
+ **/
+
+#ifndef _HAILO_TRACER_MACROS_HPP_
+#define _HAILO_TRACER_MACROS_HPP_
+
+#if defined HAILO_ENABLE_PROFILER_BUILD
+#include "tracer.hpp"
+#endif
+
+namespace hailort
+{
+
+struct VoidAll {
+ template<typename... Args> VoidAll(Args const& ...) {}
+};
+
+#if defined HAILO_ENABLE_PROFILER_BUILD
+#define TRACE(type, ...) (Tracer::trace<type>(__VA_ARGS__))
+#else
+#define TRACE(type, ...) {VoidAll temporary_name{__VA_ARGS__};}
+#endif
+
+}
+
+#endif // _HAILO_TRACER_MACROS_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file sensor_config_utils.cpp
+ * @brief Utilities for sensor_config operations
+ **/
+
+#include "common/string_utils.hpp"
+#include "common/utils.hpp"
+
+#include "utils/sensor_config_utils.hpp"
+
+#include <fstream>
+#include <sstream>
+#include <iomanip>
+
+
+namespace hailort
+{
+
+Expected<SENSOR_CONFIG_OPCODES_t> SensorConfigUtils::get_sensor_opcode_by_name(const std::string &name)
+{
+ if (name == "SENSOR_CONFIG_OPCODES_WR") {
+ return SENSOR_CONFIG_OPCODES_WR;
+ }
+ else if (name == "SENSOR_CONFIG_OPCODES_RD") {
+ return SENSOR_CONFIG_OPCODES_RD;
+ }
+ else if (name == "SENSOR_CONFIG_OPCODES_RMW") {
+ return SENSOR_CONFIG_OPCODES_RMW;
+ }
+ else if (name == "SENSOR_CONFIG_OPCODES_DELAY") {
+ return SENSOR_CONFIG_OPCODES_DELAY;
+ }
+ else {
+ LOGGER__ERROR("Failed getting opcode value by name: {}", name);
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+}
+
+Expected<std::string> SensorConfigUtils::convert_opcode_to_string(uint8_t opcode)
+{
+ switch (opcode) {
+ case SENSOR_CONFIG_OPCODES_WR:
+ return std::string("SENSOR_CONFIG_OPCODES_WR");
+
+ case SENSOR_CONFIG_OPCODES_RD:
+ return std::string("SENSOR_CONFIG_OPCODES_RD");
+
+ case SENSOR_CONFIG_OPCODES_RMW:
+ return std::string("SENSOR_CONFIG_OPCODES_RMW");
+
+ case SENSOR_CONFIG_OPCODES_DELAY:
+ return std::string("SENSOR_CONFIG_OPCODES_DELAY");
+
+ default:
+ LOGGER__ERROR("Failed converting opcode to string");
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+}
+
+Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> SensorConfigUtils::read_config_file(const std::string &config_file_path)
+{
+ std::ifstream config_file;
+ config_file.open(config_file_path, std::ios::in);
+ CHECK_AS_EXPECTED(config_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening sensor config file with errno: {}", errno);
+
+ std::vector<SENSOR_CONFIG__operation_cfg_t> control_buffers;
+ std::string line;
+ std::string col;
+
+ while(std::getline(config_file, line)) {
+ std::stringstream s(line);
+ CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading line in sensor config file with errno: {}", errno);
+
+ SENSOR_CONFIG__operation_cfg_t config_entry = {};
+
+ // opcode
+ std::getline(s, col, ',' );
+ CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file opcode with errno: {}", errno);
+ auto opcode = get_sensor_opcode_by_name(col);
+ CHECK_EXPECTED(opcode, "Failed getting opcode value");
+ config_entry.operation = static_cast<uint8_t>(opcode.value());
+
+ // length
+ std::getline(s, col, ',' );
+ CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file length with errno: {}", errno);
+ auto length = StringUtils::to_uint8(col, 10);
+ CHECK_EXPECTED(length);
+ config_entry.length = length.value();
+
+ // page
+ std::getline(s, col, ',' );
+ CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file page with errno: {}", errno);
+ auto page = StringUtils::to_int32(col, 16);
+ CHECK_EXPECTED(page);
+ if (0 > page.value()) {
+ config_entry.page = 0xff;
+ } else {
+ auto page_uint8 = StringUtils::to_uint8(col, 16);
+ CHECK_EXPECTED(page_uint8);
+ config_entry.page = page_uint8.value();
+ }
+
+ // address
+ std::getline(s, col, ',' );
+ CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file address with errno: {}", errno);
+ auto address = StringUtils::to_uint32(col, 16);
+ CHECK_EXPECTED(address);
+ config_entry.address = address.value();
+
+ // bitmask
+ std::getline(s, col, ',' );
+ CHECK_AS_EXPECTED(s.good(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file bitmask with errno: {}", errno);
+ auto bitmask = StringUtils::to_uint32(col, 16);
+ CHECK_EXPECTED(bitmask);
+ config_entry.bitmask = bitmask.value();
+
+ // value
+ std::getline(s, col, ',' );
+ CHECK_AS_EXPECTED(!s.bad(), HAILO_FILE_OPERATION_FAILURE, "Failed reading sensor config file value with errno: {}", errno);
+ auto value = StringUtils::to_uint32(col, 16);
+ CHECK_EXPECTED(value);
+ config_entry.value = value.value();
+
+ control_buffers.emplace_back(config_entry);
+ }
+ CHECK_AS_EXPECTED(!config_file.bad(), HAILO_FILE_OPERATION_FAILURE, "Failed reading line in sensor config file with errno: {}", errno);
+
+ return control_buffers;
+}
+
+Expected<SENSOR_CONFIG__operation_cfg_t> SensorConfigUtils::create_config_entry(uint8_t page, uint32_t address, uint8_t length, const std::string &hex_value)
+{
+ auto config_entry_value = StringUtils::to_uint32(hex_value, 16);
+ CHECK_EXPECTED(config_entry_value);
+
+ SENSOR_CONFIG__operation_cfg_t config_entry = {};
+ config_entry.value = config_entry_value.value();
+ config_entry.operation = SENSOR_CONFIG_OPCODES_WR;
+ config_entry.length = length;
+ config_entry.page = page;
+ config_entry.address = address;
+ config_entry.bitmask = 0xFFFF;
+
+ return config_entry;
+}
+
+Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> SensorConfigUtils::read_isp_config_file(const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path)
+{
+ std::vector<std::string> config_files = {isp_static_config_file_path, isp_runtime_config_file_path};
+ std::vector<SENSOR_CONFIG__operation_cfg_t> control_buffers;
+
+ for (const auto &config_file_path : config_files) {
+ std::ifstream config_file;
+ config_file.open(config_file_path, std::ios::in);
+ CHECK_AS_EXPECTED(config_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening sensor ISP config file with errno: {}", errno);
+
+ std::string line;
+ uint8_t page = 0;
+ uint32_t address = 0;
+
+ while (std::getline(config_file, line)) {
+ size_t comment_index = line.find("//");
+ if (((std::string::npos != comment_index) && (0 == comment_index)) || ("\n" == line) ||
+ ("\r\n" == line) || ("\r" == line) || ("" == line)) {
+ continue;
+ }
+
+ std::string::iterator it = line.begin();
+ CHECK_AS_EXPECTED(line.size() >= CONFIG_HEX_VALUE_LAST_CHAR_OFFSET, HAILO_INVALID_ARGUMENT, "Failed processing line {}. The line is not in the expected format. ", line);
+ std::string prefix(it, it + CONFIG_PREFIX_LENGTH);
+ std::string hex_value(it + CONFIG_PREFIX_LENGTH, it + CONFIG_HEX_VALUE_LAST_CHAR_OFFSET);
+
+ // page
+ if ("btp" == prefix) {
+ auto page_expected = StringUtils::to_uint8(hex_value, 16);
+ CHECK_EXPECTED(page_expected);
+ page = page_expected.value();
+ }
+
+ // address
+ else if ("bta" == prefix) {
+ auto address_expected = StringUtils::to_uint32(hex_value, 16);
+ CHECK_EXPECTED(address_expected);
+ address = address_expected.value();
+ }
+
+ else if ("btb" == prefix) {
+ auto config_entry = create_config_entry(page, address, 8, hex_value);
+ CHECK_EXPECTED(config_entry);
+
+ control_buffers.emplace_back(config_entry.release());
+ address = address + 1;
+ }
+
+ else if ("bth" == prefix) {
+ auto config_entry = create_config_entry(page, address, 16, hex_value);
+ CHECK_EXPECTED(config_entry);
+
+ control_buffers.emplace_back(config_entry.release());
+ address = address + 2;
+ }
+
+ else if ("btw" == prefix) {
+ auto config_entry = create_config_entry(page, address, 32, hex_value);
+ CHECK_EXPECTED(config_entry);
+
+ control_buffers.emplace_back(config_entry.release());
+ address = address + 4;
+ }
+
+ else {
+ LOGGER__ERROR("Invalid configuration prefix: {}", prefix);
+ return make_unexpected(HAILO_NOT_FOUND);
+ }
+ }
+ CHECK_AS_EXPECTED(!config_file.bad(), HAILO_FILE_OPERATION_FAILURE, "Failed reading line in sensor ISP config file with errno: {}", errno);
+ }
+
+ return control_buffers;
+}
+
+hailo_status SensorConfigUtils::dump_config_to_csv(SENSOR_CONFIG__operation_cfg_t *operation_cfg, const std::string &config_file_path, uint32_t entries_count)
+{
+ std::ofstream config_file;
+ config_file.open(config_file_path, std::ios::out);
+ CHECK(config_file.is_open(), HAILO_OPEN_FILE_FAILURE, "Failed opening sensor config file with errno: {}", errno);
+
+ for (size_t i = 0; i < entries_count; i++) {
+ SENSOR_CONFIG__operation_cfg_t *config_entry = &operation_cfg[i];
+
+ int page = (config_entry->page == 0xff) ? -1 : config_entry->page;
+ int hex_width_filler = (config_entry->length == 8) ? 2 : 4;
+ auto opcode_string = convert_opcode_to_string(config_entry->operation);
+ CHECK_EXPECTED_AS_STATUS(opcode_string);
+
+ // There is no need to restore flags since they only affect the fstream "config_file" and doens't affect std::cout or other files.
+ config_file << std::dec << opcode_string.value() << "," << static_cast<uint32_t>(config_entry->length) << "," << page <<
+ ",0x" << std::uppercase << std::hex << std::setfill('0') << std::setw(4) << config_entry->address <<
+ ",0x" << std::setfill('0') << std::setw(hex_width_filler) << config_entry->bitmask <<
+ ",0x" << std::setfill('0') << std::setw(hex_width_filler) << config_entry->value << std::endl;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file sensor_config_utils.hpp
+ * @brief Utilities for sensor_config operations
+ **/
+
+#ifndef _HAILO_SENSOR_CONFIG_UTILS_HPP_
+#define _HAILO_SENSOR_CONFIG_UTILS_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "control_protocol.h"
+
+#include <vector>
+#include <string>
+
+namespace hailort
+{
+
+#define MAX_CONFIG_INFO_ENTRIES (CONTROL_PROTOCOL__MAX_REQUEST_PAYLOAD_SIZE / sizeof(SENSOR_CONFIG__operation_cfg_t))
+#define MAX_CONFIG_ENTRIES_DATA_SIZE (MAX_CONFIG_INFO_ENTRIES * sizeof(SENSOR_CONFIG__operation_cfg_t))
+#define MAX_NON_ISP_SECTIONS (6)
+#define CONFIG_PREFIX_LENGTH (3)
+#define CONFIG_HEX_VALUE_LAST_CHAR_OFFSET (9)
+
+static_assert((MAX_CONFIG_INFO_ENTRIES > 0) ,"MAX_CONFIG_INFO_ENTRIES must be larger than 0");
+
+class SensorConfigUtils {
+public:
+ static Expected<SENSOR_CONFIG_OPCODES_t> get_sensor_opcode_by_name(const std::string &name);
+ static Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> read_config_file(const std::string &config_file_path);
+ static Expected<SENSOR_CONFIG__operation_cfg_t> create_config_entry(uint8_t page, uint32_t address, uint8_t length, const std::string &hex_value);
+ static Expected<std::vector<SENSOR_CONFIG__operation_cfg_t>> read_isp_config_file(const std::string &isp_static_config_file_path, const std::string &isp_runtime_config_file_path);
+ static Expected<std::string> convert_opcode_to_string(uint8_t opcode);
+ static hailo_status dump_config_to_csv(SENSOR_CONFIG__operation_cfg_t *operation_cfg, const std::string &config_file_path, uint32_t entries_count);
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SENSOR_CONFIG_UTILS_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file shared_resource_manager.hpp
+ * @brief holds and manages shared resource objects mapped by a key.
+ *
+ **/
+
+#ifndef HAILO_SHARED_RESOURCE_MANAGER_HPP_
+#define HAILO_SHARED_RESOURCE_MANAGER_HPP_
+
+#include "hailo/expected.hpp"
+#include "common/utils.hpp"
+
+#include <vector>
+#include <unordered_map>
+#include <memory>
+#include <mutex>
+#include <typeinfo>
+
+namespace hailort
+{
+
+#define HAILO_MAX_SHARED_RESOURCES (32)
+#define HAILO_UNIQUE_RESOURCE_KEY (0)
+
+template<class Key, class T>
+struct ResourceRef {
+ ResourceRef(Key user_key, std::shared_ptr<T> resource)
+ : user_key(user_key), count(0), resource(std::move(resource))
+ {}
+
+ Key user_key;
+ uint32_t count;
+ std::shared_ptr<T> resource;
+};
+
+template<class Key, class T>
+class SharedResourceManager
+{
+public:
+ static SharedResourceManager& get_instance()
+ {
+ static SharedResourceManager instance;
+ return instance;
+ }
+
+ Expected<std::shared_ptr<T>> resource_lookup(uint32_t handle)
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ auto resource = m_resources.at(handle)->resource;
+ return resource;
+ }
+
+ template<class CreateFunc>
+ Expected<uint32_t> register_resource(Key user_key, CreateFunc create)
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ uint32_t available_index = static_cast<uint32_t>(m_resources.size());
+ uint32_t match_index = static_cast<uint32_t>(m_resources.size());
+ for (uint32_t i = 0; i < m_resources.size(); ++i) {
+ if (m_resources.at(i) == nullptr) {
+ available_index = i;
+ } else {
+ if (m_resources.at(i)->user_key == user_key) {
+ // Resource already registered
+ match_index = i;
+ break;
+ }
+ }
+ }
+ bool should_create = match_index == m_resources.size() || user_key == unique_key();
+ CHECK_AS_EXPECTED(available_index < m_resources.size() || !should_create, HAILO_NOT_AVAILABLE,
+ "Tried to create more than {} shared resources of type {}", max_resources(), typeid(T).name());
+ if (should_create) {
+ // Create a new resource and register
+ auto expected_resource = create();
+ CHECK_EXPECTED(expected_resource);
+ m_resources.at(available_index) = std::make_shared<ResourceRef<Key, T>>(user_key, expected_resource.release());
+ m_resources.at(available_index)->count++;
+ return available_index;
+ }
+ m_resources.at(match_index)->count++;
+ return match_index;
+ }
+
+ void release_resource(uint32_t handle)
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_resources.at(handle)->count--;
+ if (!m_resources.at(handle)->count) {
+ m_resources.at(handle) = nullptr;
+ }
+ }
+
+private:
+ SharedResourceManager()
+ : m_resources(max_resources(), nullptr)
+ {}
+
+ static uint32_t max_resources()
+ {
+ // This method can be "overriden" with template specialization
+ // to set another MAX for specific managers.
+ return HAILO_MAX_SHARED_RESOURCES;
+ }
+
+ static Key unique_key()
+ {
+ // This method can be "overriden" with template specialization
+ // to set another UNIQUE for specific managers.
+ return HAILO_UNIQUE_RESOURCE_KEY;
+ }
+
+ std::mutex m_mutex;
+ std::vector<std::shared_ptr<ResourceRef<Key, T>>> m_resources;
+};
+
+}
+
+#endif /* HAILO_SHARED_RESOURCE_MANAGER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file thread_safe_map.hpp
+ * @brief Thread safe map
+ **/
+
+#ifndef HAILO_THREAD_SAFE_MAP_HPP_
+#define HAILO_THREAD_SAFE_MAP_HPP_
+
+#include <map>
+#include <mutex>
+
+namespace hailort
+{
+
+template<class K, class V>
+class SafeMap {
+public:
+ SafeMap() : m_map(), m_mutex() {}
+ virtual ~SafeMap() = default;
+ SafeMap(SafeMap &&map) : m_map(std::move(map.m_map)), m_mutex() {};
+
+ V& operator[](const K& k) {
+ std::lock_guard<std::mutex> lock(m_mutex);
+ return m_map[k];
+ }
+
+ V& operator[](K&& k) {
+ std::lock_guard<std::mutex> lock(m_mutex);
+ return m_map[k];
+ }
+
+ V& at(K& k) {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.at(k);
+ }
+
+ V& at(const K& k) {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.at(k);
+ }
+
+ std::size_t size() {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.size();
+ }
+
+ typename std::map<K, V>::iterator find(K& k) {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.find(k);
+ }
+
+ typename std::map<K, V>::iterator find(const K& k) {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.find(k);
+ }
+
+ bool contains(const K &k) {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.find(k) != m_map.end();
+ }
+
+ void clear() {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_map.clear();
+ }
+
+ typename std::map<K, V>::iterator begin() {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.begin();
+ }
+
+ typename std::map<K, V>::iterator end() {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ return m_map.end();
+ }
+
+protected:
+ std::map<K, V> m_map;
+ mutable std::mutex m_mutex;
+};
+
+} /* namespace hailort */
+
+#endif // HAILO_THREAD_SAFE_MAP_HPP_
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file thread_safe_queue.hpp
+ * @brief Thread safe queue taken from https://stackoverflow.com/a/16075550
+ **/
+
+#ifndef HAILO_THREAD_SAFE_QUEUE_HPP_
+#define HAILO_THREAD_SAFE_QUEUE_HPP_
+
+#include "hailo/expected.hpp"
+#include "hailo/event.hpp"
+
+#include "common/utils.hpp"
+#include "common/logger_macros.hpp"
+
+#include "utils/event_internal.hpp"
+
+// Define __unix__ for inclusion of readerwriterqueue.h because readerwriterqueue is implemented over POSIX standards
+// but checks __unix__ - otherwise QNX returns unsupported platform (need HAILO_UNDEF_UNIX_FLAG in order to undefine
+// __unix__ only in case of defining it here)
+#if defined(__QNX__) && !defined(__unix__)
+#define __unix__
+#define HAILO_UNDEF_UNIX_FLAG
+#endif
+
+#include "readerwriterqueue.h"
+
+#if defined(HAILO_UNDEF_UNIX_FLAG)
+#undef __unix__
+#undef HAILO_UNDEF_UNIX_FLAG
+#endif
+
+#include <queue>
+#include <mutex>
+#include <memory>
+#include <condition_variable>
+#include <chrono>
+
+
+namespace hailort
+{
+
+#define DEFAULT_TIMEOUT_MS (1000)
+
+// A threadsafe-queue. - https://stackoverflow.com/a/16075550
+template <class T>
+class SafeQueue {
+public:
+ SafeQueue() : m_queue(), m_mutex(), m_queue_not_empty(), m_timeout(DEFAULT_TIMEOUT_MS) {}
+ virtual ~SafeQueue() = default;
+
+ // Add an element to the queue.
+ virtual void push(T t) {
+ std::lock_guard<std::mutex> lock(m_mutex);
+ m_queue.push(t);
+ m_queue_not_empty.notify_one();
+ }
+
+ // Get the "front"-element.
+ // If the queue is empty, wait till a element is available.
+ virtual T pop() {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ while (m_queue.empty()) {
+ // release lock as long as the wait and require it afterwards.
+ m_queue_not_empty.wait_for(lock, m_timeout);
+ }
+ T val = m_queue.front();
+ m_queue.pop();
+ return val;
+ }
+
+protected:
+ std::queue<T> m_queue;
+ mutable std::mutex m_mutex;
+ std::condition_variable m_queue_not_empty;
+ const std::chrono::milliseconds m_timeout;
+};
+
+ template <class T>
+ class SafeQueueMaxSize : public SafeQueue<T> {
+ public:
+ SafeQueueMaxSize(uint32_t max_size) :
+ SafeQueue<T>::SafeQueue(),
+ m_max_size(max_size),
+ m_queue_not_full()
+ {}
+ virtual ~SafeQueueMaxSize() = default;
+
+ virtual void push(T t) override {
+ std::unique_lock<std::mutex> lock(this->m_mutex);
+ m_queue_not_full.wait(lock, [&]{return this->m_queue.size() < m_max_size;});
+
+ this->m_queue.push(t);
+ this->m_queue_not_empty.notify_one();
+ }
+
+ virtual T pop() override {
+ std::unique_lock<std::mutex> lock(this->m_mutex);
+ this->m_queue_not_empty.wait(lock, [&]{return !this->m_queue.empty();});
+
+ T val = this->m_queue.front();
+ this->m_queue.pop();
+
+ if (this->m_queue.size() < m_max_size) {
+ m_queue_not_full.notify_one();
+ }
+ return val;
+ }
+protected:
+ const uint32_t m_max_size;
+ std::condition_variable m_queue_not_full;
+};
+
+// Single-Producer Single-Consumer Queue
+// The queue's size is limited
+template<typename T, size_t MAX_BLOCK_SIZE = 512>
+class SpscQueue
+{
+private:
+ typedef moodycamel::ReaderWriterQueue<T, MAX_BLOCK_SIZE> ReaderWriterQueue;
+
+public:
+ static constexpr auto INIFINITE_TIMEOUT() { return std::chrono::milliseconds(HAILO_INFINITE); }
+
+ SpscQueue(size_t max_size, SemaphorePtr items_enqueued_sema, SemaphorePtr items_dequeued_sema,
+ EventPtr shutdown_event, std::chrono::milliseconds default_timeout) :
+ m_inner(max_size),
+ m_items_enqueued_sema_or_shutdown(items_enqueued_sema, shutdown_event),
+ m_items_enqueued_sema(items_enqueued_sema),
+ m_items_dequeued_sema_or_shutdown(items_dequeued_sema, shutdown_event),
+ m_items_dequeued_sema(items_dequeued_sema),
+ m_default_timeout(default_timeout),
+ m_size(max_size),
+ m_enqueues_count(0),
+ m_callback_mutex()
+ {}
+
+ virtual ~SpscQueue() = default;
+ SpscQueue(SpscQueue &&other) :
+ m_inner(std::move(other.m_inner)),
+ m_items_enqueued_sema_or_shutdown(std::move(other.m_items_enqueued_sema_or_shutdown)),
+ m_items_enqueued_sema(std::move(other.m_items_enqueued_sema)),
+ m_items_dequeued_sema_or_shutdown(std::move(other.m_items_dequeued_sema_or_shutdown)),
+ m_items_dequeued_sema(std::move(other.m_items_dequeued_sema)),
+ m_default_timeout(std::move(other.m_default_timeout)),
+ m_size(std::move(other.m_size)),
+ m_enqueues_count(std::move(other.m_enqueues_count.load())),
+ m_cant_enqueue_callback(std::move(other.m_cant_enqueue_callback)),
+ m_can_enqueue_callback(std::move(other.m_can_enqueue_callback)),
+ m_callback_mutex()
+ {}
+
+ static Expected<SpscQueue> create(size_t max_size, const EventPtr& shutdown_event,
+ std::chrono::milliseconds default_timeout = std::chrono::milliseconds(1000))
+ {
+ if (0 == max_size) {
+ LOGGER__ERROR("Invalid queue max_size (must be greater than zero)");
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+
+ // * items_enqueued_sema:
+ // +1 for each enqueued item
+ // -1 for each dequeued item
+ // Blocks when there are no items in the queue (hence when the queue is built it starts at zero)
+ // * items_dequeued_sema:
+ // +1 for each dequeued item
+ // -1 for each enqueued item
+ // Blocks when the queue is full (which happens when it's value reaches zero, hence it starts at queue size)
+ const auto items_enqueued_sema = Semaphore::create_shared(0);
+ CHECK_AS_EXPECTED(nullptr != items_enqueued_sema, HAILO_OUT_OF_HOST_MEMORY, "Failed creating items_enqueued_sema semaphore");
+
+ const auto items_dequeued_sema = Semaphore::create_shared(static_cast<uint32_t>(max_size));
+ CHECK_AS_EXPECTED(nullptr != items_dequeued_sema, HAILO_OUT_OF_HOST_MEMORY, "Failed creating items_dequeued_sema semaphore");
+
+ return SpscQueue(max_size, items_enqueued_sema, items_dequeued_sema, shutdown_event, default_timeout);
+ }
+
+ static std::shared_ptr<SpscQueue> create_shared(size_t max_size, const EventPtr& shutdown_event,
+ std::chrono::milliseconds default_timeout = std::chrono::milliseconds(1000))
+ {
+ auto queue = create(max_size, shutdown_event, default_timeout);
+ if (!queue) {
+ LOGGER__ERROR("Failed creating queue. status={}", queue.status());
+ return nullptr;
+ }
+
+ return make_shared_nothrow<SpscQueue>(queue.release());
+ }
+
+ static std::unique_ptr<SpscQueue> create_unique(size_t max_size, const EventPtr& shutdown_event,
+ std::chrono::milliseconds default_timeout = std::chrono::milliseconds(1000))
+ {
+ auto queue = create(max_size, shutdown_event, default_timeout);
+ if (!queue) {
+ LOGGER__ERROR("Failed creating queue. status={}", queue.status());
+ return nullptr;
+ }
+
+ return make_unique_nothrow<SpscQueue>(queue.release());
+ }
+
+ Expected<T> dequeue(std::chrono::milliseconds timeout, bool ignore_shutdown_event = false) AE_NO_TSAN
+ {
+ hailo_status wait_result = HAILO_UNINITIALIZED;
+ if (ignore_shutdown_event) {
+ wait_result = m_items_enqueued_sema->wait(timeout);
+ } else {
+ wait_result = m_items_enqueued_sema_or_shutdown.wait(timeout);
+ }
+
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == wait_result) {
+ LOGGER__TRACE("Shutdown event has been signaled");
+ return make_unexpected(wait_result);
+ }
+ if (HAILO_TIMEOUT == wait_result) {
+ LOGGER__TRACE("Timeout, the queue is empty");
+ return make_unexpected(wait_result);
+ }
+ if (HAILO_SUCCESS != wait_result) {
+ LOGGER__WARNING("m_items_enqueued_sema received an unexpected failure");
+ return make_unexpected(wait_result);
+ }
+
+ // The queue isn't empty
+ T result{};
+ const bool success = m_inner.try_dequeue(result);
+ assert(success);
+ AE_UNUSED(success);
+
+ {
+ std::unique_lock<std::mutex> lock(m_callback_mutex);
+ if ((m_size == m_enqueues_count) && m_can_enqueue_callback) {
+ m_can_enqueue_callback();
+ }
+ m_enqueues_count--;
+ }
+
+ const auto signal_result = m_items_dequeued_sema_or_shutdown.signal();
+ if (HAILO_SUCCESS != signal_result) {
+ return make_unexpected(signal_result);
+ }
+ return result;
+ }
+
+ Expected<T> dequeue() AE_NO_TSAN
+ {
+ return dequeue(m_default_timeout);
+ }
+
+ hailo_status enqueue(const T& result, std::chrono::milliseconds timeout) AE_NO_TSAN
+ {
+ const auto wait_result = m_items_dequeued_sema_or_shutdown.wait(timeout);
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == wait_result) {
+ LOGGER__TRACE("Shutdown event has been signaled");
+ return wait_result;
+ }
+ if (HAILO_TIMEOUT == wait_result) {
+ LOGGER__TRACE("Timeout, the queue is full");
+ return wait_result;
+ }
+ if (HAILO_SUCCESS != wait_result) {
+ LOGGER__WARNING("m_items_dequeued_sema received an unexpected failure");
+ return wait_result;
+ }
+
+ // The queue isn't full
+ const bool success = m_inner.try_enqueue(result);
+ assert(success);
+ AE_UNUSED(success);
+
+ {
+ std::unique_lock<std::mutex> lock(m_callback_mutex);
+ m_enqueues_count++;
+ if ((m_size == m_enqueues_count) && m_cant_enqueue_callback) {
+ m_cant_enqueue_callback();
+ }
+ }
+
+ return m_items_enqueued_sema_or_shutdown.signal();
+ }
+
+ inline hailo_status enqueue(const T& result) AE_NO_TSAN
+ {
+ return enqueue(result, m_default_timeout);
+ }
+
+ // TODO: Do away with two copies of this function? (SDK-16481)
+ hailo_status enqueue(T&& result, std::chrono::milliseconds timeout, bool ignore_shutdown_event = false) AE_NO_TSAN
+ {
+ hailo_status wait_result = HAILO_UNINITIALIZED;
+ if (ignore_shutdown_event) {
+ wait_result = m_items_dequeued_sema->wait(timeout);
+ } else {
+ wait_result = m_items_dequeued_sema_or_shutdown.wait(timeout);
+ }
+
+ if (HAILO_SHUTDOWN_EVENT_SIGNALED == wait_result) {
+ LOGGER__TRACE("Shutdown event has been signaled");
+ return wait_result;
+ }
+ if (HAILO_TIMEOUT == wait_result) {
+ LOGGER__TRACE("Timeout, the queue is full");
+ return wait_result;
+ }
+ if (HAILO_SUCCESS != wait_result) {
+ LOGGER__WARNING("m_items_dequeued_sema received an unexpected failure");
+ return wait_result;
+ }
+
+ // The queue isn't full
+ const bool success = m_inner.try_enqueue(std::move(result));
+ assert(success);
+ AE_UNUSED(success);
+
+ {
+ std::unique_lock<std::mutex> lock(m_callback_mutex);
+ m_enqueues_count++;
+ if ((m_size == m_enqueues_count) && m_cant_enqueue_callback) {
+ m_cant_enqueue_callback();
+ }
+ }
+
+ return m_items_enqueued_sema_or_shutdown.signal();
+ }
+
+ // TODO: HRT-3810, remove hacky argument ignore_shutdown_event
+ inline hailo_status enqueue(T&& result, bool ignore_shutdown_event = false) AE_NO_TSAN
+ {
+ return enqueue(std::move(result), m_default_timeout, ignore_shutdown_event);
+ }
+
+ size_t size_approx()
+ {
+ return m_inner.size_approx();
+ }
+
+ hailo_status clear() AE_NO_TSAN
+ {
+ auto status = HAILO_SUCCESS;
+ while (HAILO_SUCCESS == status) {
+ auto output = dequeue(std::chrono::milliseconds(0), true);
+ status = output.status();
+ }
+
+ if (HAILO_TIMEOUT == status) {
+ return HAILO_SUCCESS;
+ }
+ return status;
+ }
+
+ void set_on_cant_enqueue_callback(std::function<void()> callback)
+ {
+ m_cant_enqueue_callback = callback;
+ }
+
+ void set_on_can_enqueue_callback(std::function<void()> callback)
+ {
+ m_can_enqueue_callback = callback;
+ }
+
+private:
+ ReaderWriterQueue m_inner;
+ WaitOrShutdown m_items_enqueued_sema_or_shutdown;
+ SemaphorePtr m_items_enqueued_sema;
+ WaitOrShutdown m_items_dequeued_sema_or_shutdown;
+ SemaphorePtr m_items_dequeued_sema;
+ std::chrono::milliseconds m_default_timeout;
+
+ const size_t m_size;
+ std::atomic_uint32_t m_enqueues_count;
+ std::function<void()> m_cant_enqueue_callback;
+ std::function<void()> m_can_enqueue_callback;
+ std::mutex m_callback_mutex;
+};
+
+} /* namespace hailort */
+
+#endif // HAILO_THREAD_SAFE_QUEUE_HPP_
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "hailo/hailort.h"
-#include "hailo/vdevice.hpp"
-#include "vdevice_internal.hpp"
-#include "pcie_device.hpp"
-#include "core_device.hpp"
-#include "hailort_defaults.hpp"
-#include "shared_resource_manager.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "context_switch/vdevice_network_group.hpp"
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#include "rpc_client_utils.hpp"
-#include "rpc/rpc_definitions.hpp"
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-namespace hailort
-{
-
-template<>
-std::string SharedResourceManager<std::string, VDeviceBase>::unique_key()
-{
- return HAILO_UNIQUE_VDEVICE_GROUP_ID;
-}
-
-static hailo_status validate_device_ids_match(const hailo_vdevice_params_t ¶ms,
- const std::set<std::string> &old_ids)
-{
- std::set<std::string> new_ids;
- for (uint32_t i = 0; i < params.device_count; i++) {
- // TODO: maybe needs to normalize domain?
- new_ids.insert(params.device_ids[i].id);
- }
-
- CHECK(old_ids == new_ids, HAILO_INVALID_OPERATION, "Different VDevice ids used by group_id {}", (nullptr == params.group_id ? "NULL" : params.group_id));
- return HAILO_SUCCESS;
-}
-
-hailo_status validate_same_vdevice(const hailo_vdevice_params_t ¶ms, const VDevice &vdevice)
-{
- // Validate device ids
- if (params.device_ids != nullptr) {
- auto old_ids = vdevice.get_physical_devices_ids();
- CHECK_EXPECTED_AS_STATUS(old_ids);
- std::set<std::string> old_ids_set(old_ids->begin(), old_ids->end());
-
- auto status = validate_device_ids_match(params, old_ids_set);
- CHECK_SUCCESS(status);
- }
-
- // Validate count matches
- auto physical_devices = vdevice.get_physical_devices();
- CHECK_EXPECTED_AS_STATUS(physical_devices);
- CHECK(params.device_count == physical_devices->size(), HAILO_INVALID_OPERATION,
- "Different VDevice device count used by group_id {}", params.group_id);
- return HAILO_SUCCESS;
-}
-
-void release_resource_if(bool condition, uint32_t key) {
- if (condition) {
- SharedResourceManager<std::string, VDeviceBase>::get_instance().release_resource(key);
- }
-}
-
-VDeviceHandle::VDeviceHandle(uint32_t handle) : m_handle(handle)
-{}
-
-VDeviceHandle::~VDeviceHandle()
-{
- SharedResourceManager<std::string, VDeviceBase>::get_instance().release_resource(m_handle);
-}
-
-Expected<std::unique_ptr<VDevice>> VDeviceHandle::create(const hailo_vdevice_params_t ¶ms)
-{
- CHECK_AS_EXPECTED((HAILO_SCHEDULING_ALGORITHM_NONE == params.scheduling_algorithm)
- || (1 == params.device_count) || (VDeviceBase::enable_multi_device_schedeulr()), HAILO_NOT_SUPPORTED,
- "Multiple devices scheduler feature is preview. To enable it, set env variable 'HAILO_ENABLE_MULTI_DEVICE_SCHEDULER' to 1");
-
- auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
- auto create = [¶ms]() {
- return VDeviceBase::create(params);
- };
- auto expected_handle = manager.register_resource(params.group_id == nullptr ? "" : std::string(params.group_id), create);
- CHECK_EXPECTED(expected_handle);
-
- auto expected_vdevice_base = manager.resource_lookup(expected_handle.value());
- CHECK_EXPECTED(expected_vdevice_base);
-
- auto same_vdevice_status = validate_same_vdevice(params, *expected_vdevice_base.value());
- release_resource_if(same_vdevice_status != HAILO_SUCCESS, expected_handle.value());
- CHECK_SUCCESS_AS_EXPECTED(same_vdevice_status);
-
- auto handle_vdevice = std::unique_ptr<VDeviceHandle>(new VDeviceHandle(expected_handle.value()));
- CHECK_AS_EXPECTED(handle_vdevice != nullptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return std::unique_ptr<VDevice>(std::move(handle_vdevice));
-}
-
-Expected<ConfiguredNetworkGroupVector> VDeviceHandle::configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params)
-{
- auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
- auto vdevice = manager.resource_lookup(m_handle);
- CHECK_EXPECTED(vdevice);
-
- return vdevice.value()->configure(hef, configure_params);
-}
-
-Expected<std::vector<std::reference_wrapper<Device>>> VDeviceHandle::get_physical_devices() const
-{
- auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
- auto vdevice = manager.resource_lookup(m_handle);
- CHECK_EXPECTED(vdevice);
-
- return vdevice.value()->get_physical_devices();
-}
-
-Expected<std::vector<std::string>> VDeviceHandle::get_physical_devices_ids() const
-{
- auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
- auto vdevice = manager.resource_lookup(m_handle);
- CHECK_EXPECTED(vdevice);
-
- return vdevice.value()->get_physical_devices_ids();
-}
-
-Expected<hailo_stream_interface_t> VDeviceHandle::get_default_streams_interface() const
-{
- auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
- auto vdevice = manager.resource_lookup(m_handle);
- CHECK_EXPECTED(vdevice);
-
- return vdevice.value()->get_default_streams_interface();
-}
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-
-VDeviceClient::VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle)
- : m_client(std::move(client))
- , m_handle(handle)
-{}
-
-VDeviceClient::~VDeviceClient()
-{
- auto reply = m_client->VDevice_release(m_handle);
- if (reply != HAILO_SUCCESS) {
- LOGGER__CRITICAL("VDevice_release failed!");
- }
-}
-
-Expected<std::unique_ptr<VDevice>> VDeviceClient::create(const hailo_vdevice_params_t ¶ms)
-{
- grpc::ChannelArguments ch_args;
- ch_args.SetMaxReceiveMessageSize(-1);
- auto channel = grpc::CreateCustomChannel(HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials(), ch_args);
- CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
-
- auto client = std::unique_ptr<HailoRtRpcClient>(new HailoRtRpcClient(channel));
- CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
- auto init_status = HailoRtRpcClientUtils::get_instance().init_client_service_communication();
- CHECK_SUCCESS_AS_EXPECTED(init_status);
- auto reply = client->VDevice_create(params, getpid());
- CHECK_EXPECTED(reply);
-
- auto client_vdevice = std::unique_ptr<VDeviceClient>(new VDeviceClient(std::move(client), reply.value()));
- CHECK_AS_EXPECTED(client_vdevice != nullptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return std::unique_ptr<VDevice>(std::move(client_vdevice));
-}
-
-Expected<ConfiguredNetworkGroupVector> VDeviceClient::configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params)
-{
- auto networks_handles = m_client->VDevice_configure(m_handle, hef, getpid(), configure_params);
- CHECK_EXPECTED(networks_handles);
-
- ConfiguredNetworkGroupVector networks;
- networks.reserve(networks_handles->size());
- for (auto &handle : networks_handles.value()) {
- auto channel = grpc::CreateChannel(HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials());
- CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
-
- auto client = std::unique_ptr<HailoRtRpcClient>(new HailoRtRpcClient(channel));
- networks.emplace_back(make_shared_nothrow<ConfiguredNetworkGroupClient>(std::move(client), handle));
- }
- return networks;
-}
-
-Expected<std::vector<std::reference_wrapper<Device>>> VDeviceClient::get_physical_devices() const
-{
- LOGGER__ERROR("ConfiguredNetworkGroup::get_physical_devices function is not supported when using multi-process service");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-Expected<std::vector<std::string>> VDeviceClient::get_physical_devices_ids() const
-{
- return m_client->VDevice_get_physical_devices_ids(m_handle);
-}
-
-Expected<hailo_stream_interface_t> VDeviceClient::get_default_streams_interface() const
-{
- return m_client->VDevice_get_default_streams_interface(m_handle);
-}
-
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-
-Expected<std::unique_ptr<VDevice>> VDevice::create(const hailo_vdevice_params_t ¶ms)
-{
- CHECK_AS_EXPECTED(0 != params.device_count, HAILO_INVALID_ARGUMENT,
- "VDevice creation failed. invalid device_count ({}).", params.device_count);
-
- std::unique_ptr<VDevice> vdevice;
- if (params.multi_process_service) {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
- auto expected_vdevice = VDeviceClient::create(params);
- CHECK_EXPECTED(expected_vdevice);
- vdevice = expected_vdevice.release();
-#else
- LOGGER__ERROR("multi_process_service requires service compilation with HAILO_BUILD_SERVICE");
- return make_unexpected(HAILO_INVALID_OPERATION);
-#endif // HAILO_SUPPORT_MULTI_PROCESS
- } else {
- auto expected_vdevice = VDeviceHandle::create(params);
- CHECK_EXPECTED(expected_vdevice);
- vdevice = expected_vdevice.release();
- }
- // Upcasting to VDevice unique_ptr
- auto vdevice_ptr = std::unique_ptr<VDevice>(vdevice.release());
- return vdevice_ptr;
-}
-
-Expected<std::unique_ptr<VDevice>> VDevice::create()
-{
- auto params = HailoRTDefaults::get_vdevice_params();
- return create(params);
-}
-
-Expected<std::unique_ptr<VDevice>> VDevice::create(const std::vector<std::string> &device_ids)
-{
- auto params = HailoRTDefaults::get_vdevice_params();
-
- auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
- CHECK_EXPECTED(device_ids_vector);
-
- params.device_ids = device_ids_vector->data();
- params.device_count = static_cast<uint32_t>(device_ids_vector->size());
-
- return create(params);
-}
-
-Expected<std::unique_ptr<VDeviceBase>> VDeviceBase::create(const hailo_vdevice_params_t ¶ms)
-{
- NetworkGroupSchedulerPtr scheduler_ptr;
- if (HAILO_SCHEDULING_ALGORITHM_NONE != params.scheduling_algorithm) {
- if (HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN == params.scheduling_algorithm) {
- auto network_group_scheduler = NetworkGroupScheduler::create_round_robin(params.device_count);
- CHECK_EXPECTED(network_group_scheduler);
- scheduler_ptr = network_group_scheduler.release();
- } else {
- LOGGER__ERROR("Unsupported scheduling algorithm");
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
- }
-
- auto devices_expected = create_devices(params);
- CHECK_EXPECTED(devices_expected);
- auto devices = devices_expected.release();
-
- std::string vdevice_ids = "VDevice Infos:";
- for (const auto &device : devices) {
- auto info_str = device->get_dev_id();
- vdevice_ids += " " + std::string(info_str);
- }
- LOGGER__INFO("{}", vdevice_ids);
-
- auto vdevice = std::unique_ptr<VDeviceBase>(new (std::nothrow) VDeviceBase(std::move(devices), scheduler_ptr));
- CHECK_AS_EXPECTED(nullptr != vdevice, HAILO_OUT_OF_HOST_MEMORY);
-
- return vdevice;
-}
-
-Expected<ConfiguredNetworkGroupVector> VDeviceBase::configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params)
-{
- std::unique_lock<std::mutex> lock(m_mutex);
- auto start_time = std::chrono::steady_clock::now();
-
- for (auto &device : m_devices) {
- auto status = device->check_hef_is_compatible(hef);
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- auto local_config_params = configure_params;
- if (local_config_params.empty()) {
- // All stream iface should be the same
- auto stream_interface = m_devices[0]->get_default_streams_interface();
- CHECK_EXPECTED(stream_interface);
- auto config_params_exp = hef.create_configure_params(stream_interface.value());
- CHECK_EXPECTED(config_params_exp);
- local_config_params = config_params_exp.release();
- }
-
- /* Validate batch size is identical for all networks in case scheduler is enabled */
- if (m_network_group_scheduler) {
- uint16_t ref_batch_size = UINT16_MAX;
- for (const auto &ng_params_pair : local_config_params) {
- for (const auto &network_params_pair : ng_params_pair.second.network_params_by_name) {
- if (UINT16_MAX == ref_batch_size) {
- ref_batch_size = network_params_pair.second.batch_size;
- }
- CHECK_AS_EXPECTED(ref_batch_size == network_params_pair.second.batch_size, HAILO_INVALID_OPERATION,
- "When scheduler is enabled, all networks should have the same batch_size. configure_params contains {} and {}. "
- "To disable scheduler, set HAILO_SCHEDULING_ALGORITHM_NONE in VDevice creation.", ref_batch_size, network_params_pair.second.batch_size);
- }
- }
- }
-
- ConfiguredNetworkGroupVector added_network_groups;
- added_network_groups.reserve(configure_params.size());
-
- for (const auto &network_params_pair : local_config_params) {
- std::shared_ptr<VDeviceNetworkGroup> identical_ng = nullptr;
- if (m_network_group_scheduler && PipelineMultiplexer::should_use_multiplexer()) {
- for (auto &network_group : m_network_groups) {
- if ((network_group->equals(hef, network_params_pair.first)) && (1 == network_group->get_input_streams().size())) {
- // TODO (HRT-8634): Support multi-inputs NGs (multi networks)
- identical_ng = network_group;
- break;
- }
- }
- }
- std::shared_ptr<VDeviceNetworkGroup> vdevice_netwrok_group = nullptr;
- if (identical_ng) {
- auto vdevice_netwrok_group_exp = VDeviceNetworkGroup::duplicate(identical_ng);
- CHECK_EXPECTED(vdevice_netwrok_group_exp);
-
- vdevice_netwrok_group = vdevice_netwrok_group_exp.release();
-
- vdevice_netwrok_group->set_network_group_handle(identical_ng->network_group_handle());
- vdevice_netwrok_group->create_vdevice_streams_from_duplicate(identical_ng);
-
- } else {
- ConfiguredNetworkGroupVector network_group_bundle; // bundle of the same NGs for all devices
- network_group_bundle.reserve(m_devices.size());
-
- for (auto &device : m_devices) {
- auto ng_vector = device->configure(hef, { std::make_pair(network_params_pair.first, network_params_pair.second) });
- CHECK_EXPECTED(ng_vector);
-
- assert(1 == ng_vector->size());
- network_group_bundle.push_back(ng_vector.release()[0]);
- }
-
- auto vdevice_netwrok_group_exp = VDeviceNetworkGroup::create(network_group_bundle, m_network_group_scheduler);
- CHECK_EXPECTED(vdevice_netwrok_group_exp);
-
- vdevice_netwrok_group = vdevice_netwrok_group_exp.release();
-
- auto ng_handle = INVALID_NETWORK_GROUP_HANDLE;
- if (m_network_group_scheduler) {
- auto network_group_handle_exp = m_network_group_scheduler->add_network_group(vdevice_netwrok_group);
- CHECK_EXPECTED(network_group_handle_exp);
- ng_handle = network_group_handle_exp.release();
- }
- vdevice_netwrok_group->set_network_group_handle(ng_handle);
- auto status = vdevice_netwrok_group->create_vdevice_streams_from_config_params(make_shared_nothrow<PipelineMultiplexer>(), ng_handle);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- m_network_groups.push_back(vdevice_netwrok_group);
- }
-
- added_network_groups.push_back(vdevice_netwrok_group);
- }
-
- auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
- LOGGER__INFO("Configuring HEF on VDevice took {} milliseconds", elapsed_time_ms);
-
- return added_network_groups;
-}
-
-Expected<hailo_stream_interface_t> VDeviceBase::get_default_streams_interface() const
-{
- auto stream_interface = m_devices[0]->get_default_streams_interface();
- CHECK_EXPECTED(stream_interface);
- for (auto &dev : m_devices) {
- auto current_stream_interface = dev->get_default_streams_interface();
- CHECK_EXPECTED(current_stream_interface);
- CHECK_AS_EXPECTED(*current_stream_interface == *stream_interface, HAILO_INTERNAL_FAILURE,
- "vDevice is supported only with homogeneous device type");
- }
- return stream_interface.release();
-}
-
-Expected<std::vector<std::unique_ptr<VdmaDevice>>> VDeviceBase::create_devices(const hailo_vdevice_params_t ¶ms)
-{
- std::vector<std::unique_ptr<VdmaDevice>> devices;
- devices.reserve(params.device_count);
-
- const bool user_specific_devices = (params.device_ids != nullptr);
-
- auto device_ids = get_device_ids(params);
- CHECK_EXPECTED(device_ids);
-
- for (const auto &device_id : device_ids.value()) {
- if (devices.size() == params.device_count) {
- break;
- }
- auto device = VdmaDevice::create(device_id);
- CHECK_EXPECTED(device);
-
- // Validate That if (device_count != 1), device arch is not H8L. May be changed in SDK-28729
- if (1 != params.device_count) {
- auto device_arch = device.value()->get_architecture();
- CHECK_EXPECTED(device_arch);
- CHECK_AS_EXPECTED(HAILO_ARCH_HAILO8L != device_arch.value(), HAILO_INVALID_OPERATION,
- "VDevice with multiple devices is not supported on HAILO_ARCH_HAILO8L. device {} is HAILO_ARCH_HAILO8L", device_id);
- }
-
- auto status = device.value()->mark_as_used();
- if (!user_specific_devices && (HAILO_DEVICE_IN_USE == status)) {
- // Continue only if the user didn't ask for specific devices
- continue;
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
- devices.emplace_back(device.release());
- }
- CHECK_AS_EXPECTED(params.device_count == devices.size(), HAILO_OUT_OF_PHYSICAL_DEVICES,
- "Failed to create vdevice. there are not enough free devices. requested: {}, found: {}",
- params.device_count, devices.size());
-
- return devices;
-}
-
-Expected<std::vector<std::string>> VDeviceBase::get_device_ids(const hailo_vdevice_params_t ¶ms)
-{
- if (params.device_ids == nullptr) {
- // Use device scan pool
- return Device::scan();
- }
- else {
- std::vector<std::string> device_ids;
- device_ids.reserve(params.device_count);
-
- for (size_t i = 0; i < params.device_count; i++) {
- device_ids.emplace_back(params.device_ids[i].id);
- }
-
- return device_ids;
- }
-}
-
-
-} /* namespace hailort */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdevice.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_core_op.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/pipeline_multiplexer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_stream.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_stream_multiplexer_wrapper.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/network_group_scheduler.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/scheduler_oracle.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/scheduled_core_op_state.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/multi_device_scheduled_stream.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file pipeline_multiplexer.cpp
+ * @brief: Pipeline Multiplexer
+ **/
+
+#include "hailo/hailort_common.hpp"
+#include "hailo/vstream.hpp"
+
+#include "common/utils.hpp"
+
+#include "vdevice/pipeline_multiplexer.hpp"
+
+
+namespace hailort
+{
+
+PipelineMultiplexer::PipelineMultiplexer() :
+ m_should_core_op_stop(),
+ m_input_streams_count(0),
+ m_output_streams_count(0),
+ m_next_to_write(0),
+ m_order_queue(),
+ m_currently_writing(INVALID_CORE_OP_HANDLE),
+ m_written_streams_count(0),
+ m_read_streams_count(0),
+ m_next_to_read_after_drain(INVALID_CORE_OP_HANDLE)
+{}
+
+bool PipelineMultiplexer::should_use_multiplexer()
+{
+ auto disable_multiplexer_env = std::getenv(DISABLE_MULTIPLEXER_ENV_VAR);
+ if ((nullptr != disable_multiplexer_env) && (strnlen(disable_multiplexer_env, 2) == 1) && (strncmp(disable_multiplexer_env, "1", 1) == 0)) {
+ LOGGER__WARNING("Usage of '{}' env variable is deprecated.", DISABLE_MULTIPLEXER_ENV_VAR);
+ return false;
+ }
+ return true;
+}
+
+hailo_status PipelineMultiplexer::add_core_op_instance(multiplexer_core_op_handle_t core_op_handle, CoreOp &core_op)
+{
+ std::unique_lock<std::mutex> lock(m_writing_mutex);
+ std::unique_lock<std::mutex> read_lock(m_reading_mutex);
+ assert(!contains(m_should_core_op_stop, core_op_handle));
+
+ auto is_first_instance = (0 == instances_count());
+
+ auto stream_infos = core_op.get_all_stream_infos();
+ CHECK_EXPECTED_AS_STATUS(stream_infos);
+
+ for (const auto &stream_info : stream_infos.value()) {
+ m_should_core_op_stop[core_op_handle][stream_info.name] = false;
+ if (is_first_instance) {
+ // To be filled only on first instance
+ if (HAILO_H2D_STREAM == stream_info.direction) {
+ m_input_streams_count++;
+ } else {
+ m_output_streams_count++;
+ m_is_stream_reading[stream_info.name] = false;
+ }
+ }
+ }
+
+ m_write_barriers[core_op_handle] = make_shared_nothrow<Barrier>(m_input_streams_count);
+ CHECK(nullptr != m_write_barriers[core_op_handle], HAILO_OUT_OF_HOST_MEMORY);
+ m_is_waiting_to_write[core_op_handle] = false;
+
+ return HAILO_SUCCESS;
+}
+
+void PipelineMultiplexer::set_output_vstreams_names(multiplexer_core_op_handle_t core_op_handle, const std::vector<OutputVStream> &output_vstreams)
+{
+ std::unique_lock<std::mutex> lock(m_writing_mutex);
+ for (const auto &output_vstream : output_vstreams) {
+ m_can_output_vstream_read[core_op_handle][output_vstream.name()] = true;
+ }
+ m_can_core_op_read[core_op_handle] = true;
+}
+
+bool PipelineMultiplexer::has_more_than_one_core_op_instance() const
+{
+ return instances_count() > 1;
+}
+
+size_t PipelineMultiplexer::instances_count() const
+{
+ return m_should_core_op_stop.size();
+}
+
+bool PipelineMultiplexer::should_core_op_stop(multiplexer_core_op_handle_t core_op_handle)
+{
+ for (const auto &name_flag_pair : m_should_core_op_stop[core_op_handle]) {
+ if (name_flag_pair.second) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+hailo_status PipelineMultiplexer::wait_for_write(multiplexer_core_op_handle_t core_op_handle)
+{
+ std::shared_ptr<hailort::Barrier> barrier;
+ {
+ std::unique_lock<std::mutex> lock(m_writing_mutex);
+ assert(contains(m_write_barriers, core_op_handle));
+ barrier = m_write_barriers[core_op_handle];
+ }
+ // TODO: This has no timeout
+ // TODO: HRT-8634
+ barrier->arrive_and_wait();
+ {
+ std::unique_lock<std::mutex> lock(m_writing_mutex);
+ assert(contains(m_should_core_op_stop, core_op_handle));
+ assert(contains(m_is_waiting_to_write, core_op_handle));
+
+ m_is_waiting_to_write[core_op_handle] = true;
+ hailo_status status = HAILO_SUCCESS;
+ m_writing_cv.wait(lock, [this, core_op_handle, &status] {
+ if (!has_more_than_one_core_op_instance() || !should_use_multiplexer()) {
+ return true;
+ }
+
+ if (should_core_op_stop(core_op_handle)) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+
+ if (m_currently_writing == core_op_handle) {
+ return true;
+ }
+
+ if (!can_core_op_read(core_op_handle)) {
+ return false;
+ }
+
+ if (INVALID_CORE_OP_HANDLE == m_currently_writing) {
+ if ((m_next_to_write != core_op_handle) && m_is_waiting_to_write[m_next_to_write] && can_core_op_read(m_next_to_write)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+ });
+ m_is_waiting_to_write[core_op_handle] = false;
+
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ if (INVALID_CORE_OP_HANDLE == m_currently_writing) {
+ m_currently_writing = core_op_handle;
+ m_next_to_write = m_currently_writing;
+ }
+ }
+ m_writing_cv.notify_all();
+
+ return HAILO_SUCCESS;
+}
+
+bool PipelineMultiplexer::can_core_op_read(multiplexer_core_op_handle_t core_op_handle)
+{
+ if (should_core_op_stop(core_op_handle)) {
+ return false;
+ }
+
+ if (!contains(m_can_core_op_read, core_op_handle)) {
+ return true;
+ }
+
+ return m_can_core_op_read[core_op_handle];
+}
+
+hailo_status PipelineMultiplexer::signal_write_finish(multiplexer_core_op_handle_t core_op_handle, bool did_write_fail)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_writing_mutex);
+ m_written_streams_count++;
+ if (m_written_streams_count == m_input_streams_count) {
+ m_written_streams_count = 0;
+ m_currently_writing = INVALID_CORE_OP_HANDLE;
+ m_next_to_write++;
+ m_next_to_write %= static_cast<uint32_t>(instances_count());
+
+ if (!did_write_fail) {
+ std::unique_lock<std::mutex> reading_lock(m_reading_mutex);
+ m_order_queue.push_back(core_op_handle);
+ }
+ m_reading_cv.notify_all();
+ }
+ }
+
+ m_writing_cv.notify_all();
+ return HAILO_SUCCESS;
+}
+
+Expected<uint32_t> PipelineMultiplexer::wait_for_read(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name,
+ const std::chrono::milliseconds &timeout)
+{
+ uint32_t drain_frames = 0;
+
+ {
+ std::unique_lock<std::mutex> lock(m_reading_mutex);
+
+ assert(contains(m_should_core_op_stop, core_op_handle));
+ assert(contains(m_is_stream_reading, stream_name));
+
+ hailo_status status = HAILO_SUCCESS;
+ auto wait_res = m_reading_cv.wait_for(lock, timeout, [this, core_op_handle, stream_name, &drain_frames, &status] {
+ if (should_core_op_stop(core_op_handle)) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+ if (m_is_stream_reading[stream_name]) {
+ return false;
+ }
+
+ if (m_next_to_read_after_drain == core_op_handle) {
+ drain_frames = m_num_frames_to_drain[stream_name];
+ return true;
+ }
+
+ if (m_order_queue.empty()) {
+ return false;
+ }
+
+ if (m_order_queue.front() != core_op_handle) {
+ if (!should_core_op_stop(m_order_queue.front())) {
+ return false;
+ }
+
+ // This means the NG that is currently writing was aborted so we have to wait for it to finish processing its frames
+ if ((INVALID_CORE_OP_HANDLE != m_currently_writing) && (m_currently_writing != core_op_handle)) {
+ return false;
+ }
+
+ uint32_t max_drain_count = get_frame_count_to_drain(core_op_handle);
+ if (0 == max_drain_count) {
+ return false;
+ }
+
+ drain_frames = drain_aborted_in_order_queue(core_op_handle, stream_name, max_drain_count);
+ }
+
+ return true;
+ });
+ CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "{} (D2H) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ m_is_stream_reading[stream_name] = true;
+ }
+
+ m_reading_cv.notify_all();
+ return drain_frames;
+}
+
+uint32_t PipelineMultiplexer::get_frame_count_to_drain(multiplexer_core_op_handle_t core_op_handle)
+{
+ uint32_t drain_count = 0;
+ for (const auto &handle : m_order_queue) {
+ if (!should_core_op_stop(handle)) {
+ if (handle == core_op_handle) {
+ // Current instance is in the front after draining
+ break;
+ } else {
+ // Someone else should drain these frames, the current instance won't be in front after draining
+ return 0;
+ }
+ }
+
+ drain_count++;
+ }
+
+ return drain_count;
+}
+
+uint32_t PipelineMultiplexer::drain_aborted_in_order_queue(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name,
+ uint32_t max_drain_count)
+{
+ // In case of multiple outputs where one or more already read the frame we need to drain one less frame
+ for (auto &name_flag_pair : m_is_stream_reading) {
+ if (name_flag_pair.second) {
+ m_num_frames_to_drain[name_flag_pair.first] = max_drain_count - 1;
+ } else {
+ m_num_frames_to_drain[name_flag_pair.first] = max_drain_count;
+ }
+ }
+
+ m_next_to_read_after_drain = core_op_handle;
+ m_read_streams_count = 0;
+ for (uint32_t i = 0; i < max_drain_count; i++) {
+ for (auto &name_flag_pair : m_is_stream_reading) {
+ name_flag_pair.second = false;
+ }
+ m_order_queue.pop_front();
+ }
+
+ return m_num_frames_to_drain[stream_name];
+}
+
+hailo_status PipelineMultiplexer::signal_read_finish()
+{
+ std::unique_lock<std::mutex> lock(m_reading_mutex);
+
+ m_read_streams_count++;
+ if (m_read_streams_count == m_output_streams_count) {
+ m_read_streams_count = 0;
+ m_order_queue.pop_front();
+ for (auto &name_flag_pair : m_is_stream_reading) {
+ name_flag_pair.second = false;
+ }
+
+ m_next_to_read_after_drain = INVALID_CORE_OP_HANDLE;
+
+ lock.unlock();
+ m_reading_cv.notify_all();
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status PipelineMultiplexer::enable_stream(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name)
+{
+ {
+ std::unique_lock<std::mutex> write_lock(m_writing_mutex);
+ std::unique_lock<std::mutex> read_lock(m_reading_mutex);
+ assert(contains(m_should_core_op_stop, core_op_handle));
+ assert(contains(m_should_core_op_stop[core_op_handle], stream_name));
+
+ if (!m_should_core_op_stop[core_op_handle][stream_name]) {
+ return HAILO_SUCCESS;
+ }
+
+ m_should_core_op_stop[core_op_handle][stream_name] = false;
+
+ // TODO: should we 'enable' barrier?
+ }
+
+ m_writing_cv.notify_all();
+ m_reading_cv.notify_all();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status PipelineMultiplexer::disable_stream(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name)
+{
+ {
+ std::unique_lock<std::mutex> write_lock(m_writing_mutex);
+ std::unique_lock<std::mutex> read_lock(m_reading_mutex);
+ assert(contains(m_should_core_op_stop, core_op_handle));
+ assert(contains(m_should_core_op_stop[core_op_handle], stream_name));
+
+ if (m_should_core_op_stop[core_op_handle][stream_name]) {
+ return HAILO_SUCCESS;
+ }
+
+ m_should_core_op_stop[core_op_handle][stream_name] = true;
+
+ assert(contains(m_write_barriers, core_op_handle));
+ m_write_barriers[core_op_handle]->terminate();
+ }
+
+ m_writing_cv.notify_all();
+ m_reading_cv.notify_all();
+
+ return HAILO_SUCCESS;
+}
+
+void PipelineMultiplexer::RunOnceForStream::add_instance()
+{
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_calls_count[static_cast<uint32_t>(m_calls_count.size())] = 0;
+}
+
+void PipelineMultiplexer::RunOnceForStream::set_callback(std::function<hailo_status()> callback)
+{
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_callback = callback;
+}
+
+hailo_status PipelineMultiplexer::RunOnceForStream::run(multiplexer_core_op_handle_t core_op_handle)
+{
+ std::unique_lock<std::mutex> lock(m_mutex);
+ assert(contains(m_calls_count, core_op_handle));
+
+ m_calls_count[core_op_handle]++;
+ for (auto &handle_flag_pair : m_calls_count) {
+ if (0 == handle_flag_pair.second) {
+ return HAILO_SUCCESS;
+ }
+ }
+
+ for (auto &handle_flag_pair : m_calls_count) {
+ handle_flag_pair.second--;
+ }
+
+ return m_callback();
+}
+
+hailo_status PipelineMultiplexer::register_run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t handle,
+ std::function<hailo_status()> callback)
+{
+ std::unique_lock<std::mutex> lock(m_register_run_once_mutex);
+ if (!contains(m_run_once_db[stream_name], handle)) {
+ m_run_once_db[stream_name][handle] = make_shared_nothrow<RunOnceForStream>();
+ CHECK(nullptr != m_run_once_db[stream_name][handle], HAILO_OUT_OF_HOST_MEMORY);
+
+ m_run_once_db[stream_name][handle]->set_callback(callback);
+ }
+
+ m_run_once_db[stream_name][handle]->add_instance();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status PipelineMultiplexer::run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t run_once_handle,
+ multiplexer_core_op_handle_t core_op_handle)
+{
+ return m_run_once_db[stream_name][run_once_handle]->run(core_op_handle);
+}
+
+void PipelineMultiplexer::set_can_output_vstream_read(multiplexer_core_op_handle_t core_op_handle, const std::string &vstream_name, bool can_read)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_writing_mutex);
+ assert(contains(m_can_output_vstream_read, core_op_handle));
+ assert(contains(m_can_output_vstream_read[core_op_handle], vstream_name));
+ assert(contains(m_can_core_op_read, core_op_handle));
+
+ m_can_output_vstream_read[core_op_handle][vstream_name] = can_read;
+
+ if (can_read != m_can_core_op_read[core_op_handle]) {
+ m_can_core_op_read[core_op_handle] = true;
+ for (const auto &name_bool_pair : m_can_output_vstream_read[core_op_handle]) {
+ if (!name_bool_pair.second) {
+ m_can_core_op_read[core_op_handle] = false;
+ break;
+ }
+ }
+ }
+ }
+ m_writing_cv.notify_all();
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file pipeline_multiplexer.hpp
+ * @brief The pipeline multiplexer is a synchronization mechanism that allows communication
+ * between different pipelines that use the same low-level streams.
+ **/
+
+#ifndef _HAILO_PIPELINE_MULTIPLEXER_HPP_
+#define _HAILO_PIPELINE_MULTIPLEXER_HPP_
+
+#include "hailo/event.hpp"
+
+#include "common/barrier.hpp"
+
+#include "vdevice/scheduler/network_group_scheduler.hpp"
+
+#include <mutex>
+#include <queue>
+
+
+namespace hailort
+{
+
+#define DISABLE_MULTIPLEXER_ENV_VAR "HAILO_DISABLE_MULTIPLEXER"
+
+using multiplexer_core_op_handle_t = uint32_t;
+using run_once_for_stream_handle_t = uint32_t;
+
+class PipelineMultiplexer
+{
+public:
+ PipelineMultiplexer();
+
+ virtual ~PipelineMultiplexer() = default;
+ PipelineMultiplexer(const PipelineMultiplexer &other) = delete;
+ PipelineMultiplexer &operator=(const PipelineMultiplexer &other) = delete;
+ PipelineMultiplexer &operator=(PipelineMultiplexer &&other) = delete;
+ PipelineMultiplexer(PipelineMultiplexer &&other) = delete;
+
+ hailo_status add_core_op_instance(multiplexer_core_op_handle_t core_op_handle, CoreOp &core_op);
+ void set_output_vstreams_names(multiplexer_core_op_handle_t core_op_handle, const std::vector<OutputVStream> &output_vstreams);
+ bool has_more_than_one_core_op_instance() const;
+ size_t instances_count() const;
+ hailo_status wait_for_write(multiplexer_core_op_handle_t core_op_handle);
+ hailo_status signal_write_finish(multiplexer_core_op_handle_t core_op_handle, bool did_write_fail);
+ Expected<uint32_t> wait_for_read(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name,
+ const std::chrono::milliseconds &timeout);
+ hailo_status signal_read_finish();
+ hailo_status enable_stream(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name);
+ hailo_status disable_stream(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name);
+
+ hailo_status register_run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t handle, std::function<hailo_status()> callback);
+ hailo_status run_once_for_stream(const std::string &stream_name, run_once_for_stream_handle_t run_once_handle,
+ multiplexer_core_op_handle_t core_op_handle);
+
+ void set_can_output_vstream_read(multiplexer_core_op_handle_t core_op_handle, const std::string &vstream_name, bool can_read);
+
+ static bool should_use_multiplexer();
+
+private:
+
+ bool should_core_op_stop(multiplexer_core_op_handle_t core_op_handle);
+
+ std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_bool>> m_should_core_op_stop;
+ std::unordered_map<multiplexer_core_op_handle_t, std::atomic_bool> m_is_waiting_to_write;
+
+ uint32_t m_input_streams_count;
+ uint32_t m_output_streams_count;
+
+ multiplexer_core_op_handle_t m_next_to_write;
+ std::unordered_map<multiplexer_core_op_handle_t, std::shared_ptr<Barrier>> m_write_barriers;
+ std::deque<multiplexer_core_op_handle_t> m_order_queue;
+ std::mutex m_writing_mutex;
+ std::condition_variable m_writing_cv;
+ multiplexer_core_op_handle_t m_currently_writing;
+ std::atomic_uint32_t m_written_streams_count;
+
+ std::unordered_map<std::string, std::atomic_bool> m_is_stream_reading;
+ std::mutex m_reading_mutex;
+ std::condition_variable m_reading_cv;
+ std::atomic_uint32_t m_read_streams_count;
+ std::unordered_map<std::string, std::atomic_uint32_t> m_num_frames_to_drain;
+ multiplexer_core_op_handle_t m_next_to_read_after_drain;
+
+ std::unordered_map<multiplexer_core_op_handle_t, std::unordered_map<std::string, std::atomic_bool>> m_can_output_vstream_read;
+ std::unordered_map<multiplexer_core_op_handle_t, std::atomic_bool> m_can_core_op_read;
+
+ bool can_core_op_read(multiplexer_core_op_handle_t core_op_handle);
+ uint32_t get_frame_count_to_drain(multiplexer_core_op_handle_t core_op_handle);
+ uint32_t drain_aborted_in_order_queue(multiplexer_core_op_handle_t core_op_handle, const std::string &stream_name, uint32_t max_drain_count);
+
+ class RunOnceForStream final
+ {
+ public:
+ RunOnceForStream() {};
+
+ private:
+ void add_instance();
+ void set_callback(std::function<hailo_status()> callback);
+ hailo_status run(multiplexer_core_op_handle_t core_op_handle);
+
+ std::unordered_map<multiplexer_core_op_handle_t, std::atomic_uint32_t> m_calls_count;
+ std::function<hailo_status()> m_callback;
+ std::mutex m_mutex;
+
+ friend class PipelineMultiplexer;
+ };
+
+ // The run once map stores for each stream (by name), a map of RunOnceForStream which the user can register to.
+ // run_once_for_stream_handle_t is the handle which the user can access to his specific callback (for example, abort stream function).
+ // This is used for flushing, aborting and clear aborting streams.
+ std::unordered_map<std::string, std::unordered_map<run_once_for_stream_handle_t, std::shared_ptr<RunOnceForStream>>> m_run_once_db;
+ std::mutex m_register_run_once_mutex;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_PIPELINE_MULTIPLEXER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file multi_device_scheduled_stream.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "vdevice/scheduler/multi_device_scheduled_stream.hpp"
+
+namespace hailort
+{
+
+hailo_status MultiDeviceScheduledInputStream::send_pending_buffer(size_t device_index)
+{
+ auto buffer = m_queue->front(get_timeout()); // Counting on scheduler to not allow paralle calls to this function
+ if (HAILO_STREAM_ABORTED_BY_USER == buffer.status()) {
+ LOGGER__INFO("'front' was aborted.");
+ return buffer.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(buffer);
+ auto status = m_streams[device_index].get().write_buffer_only(buffer.value());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("send_pending_buffer was aborted.");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ m_queue->pop(); // Release buffer to free the queue for other dequeues
+
+ VdmaInputStream &vdma_input = static_cast<VdmaInputStream&>(m_streams[device_index].get());
+ return vdma_input.send_pending_buffer();
+}
+
+Expected<size_t> MultiDeviceScheduledInputStream::sync_write_raw_buffer(const MemoryView &buffer,
+ const std::function<bool()> &should_cancel)
+{
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK_AS_EXPECTED(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto status = core_ops_scheduler->wait_for_write(m_core_op_handle, name(), get_timeout(), should_cancel);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Write to stream was aborted.");
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = m_queue->push(buffer, get_timeout());
+
+ auto write_finish_status = core_ops_scheduler->signal_write_finish(m_core_op_handle, name(), status != HAILO_SUCCESS);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("'push' was aborted.");
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ if (HAILO_STREAM_ABORTED_BY_USER == write_finish_status) {
+ return make_unexpected(write_finish_status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(write_finish_status);
+
+ return buffer.size();
+}
+
+Expected<size_t> MultiDeviceScheduledInputStream::get_pending_frames_count() const
+{
+ return get_queue_size();
+}
+
+size_t MultiDeviceScheduledInputStream::get_queue_size() const
+{
+ return m_queue->size();
+}
+
+hailo_status MultiDeviceScheduledInputStream::abort()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto abort_status = stream.get().abort();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, stream.get().get_dev_id());
+ status = abort_status;
+ }
+ }
+ m_queue->abort();
+
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto disable_status = core_ops_scheduler->disable_stream(m_core_op_handle, name());
+ if (HAILO_SUCCESS != disable_status) {
+ LOGGER__ERROR("Failed to disable stream in the core-op scheduler. (status: {})", disable_status);
+ status = disable_status;
+ }
+
+ return status;
+}
+
+hailo_status MultiDeviceScheduledInputStream::clear_abort()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto clear_abort_status = stream.get().clear_abort();
+ if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
+ LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
+ status = clear_abort_status;
+ }
+ }
+ m_queue->clear_abort();
+
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto enable_status = core_ops_scheduler->enable_stream(m_core_op_handle, name());
+ if (HAILO_SUCCESS != enable_status) {
+ LOGGER__ERROR("Failed to enable stream in the core-op scheduler. (status: {})", enable_status);
+ status = enable_status;
+ }
+
+ return status;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file multi_device_stream.hpp
+ * @brief Internal multi device stream implementation for scheduled streams
+ *
+ **/
+
+#ifndef HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_
+#define HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "vdevice/vdevice_internal.hpp"
+#include "vdevice/scheduler/scheduled_stream.hpp"
+#include "vdma/vdma_device.hpp"
+
+
+namespace hailort
+{
+
+class BuffersQueue
+{
+public:
+ static Expected<std::unique_ptr<BuffersQueue>> create_unique(size_t buffer_size, size_t buffers_count)
+ {
+ std::vector<Buffer> queue;
+ queue.reserve(buffers_count);
+ for (size_t i = 0; i < (buffers_count); i++) {
+ auto buff = Buffer::create(buffer_size);
+ CHECK_EXPECTED(buff);
+ queue.emplace_back(buff.release());
+ }
+
+ auto ptr = make_unique_nothrow<BuffersQueue>(std::move(queue));
+ CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
+ return ptr;
+ }
+
+ hailo_status push(const MemoryView &buff, const std::chrono::milliseconds &timeout)
+ {
+ auto status = HAILO_SUCCESS;
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+
+ // TODO: this validation is done in scheduler logic. can be removed?
+ auto wait_res = m_cv.wait_for(lock, timeout, [this, &status] {
+ if (m_should_stop) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true;
+ }
+ return size() < m_queue.size();
+ });
+ CHECK(wait_res, HAILO_TIMEOUT, "Failed to enqueue frame with status={}, timeout={}ms", HAILO_TIMEOUT, timeout.count());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("'push' was aborted by user");
+ return status;
+ }
+
+ std::memcpy(m_queue[m_head].data(), buff.data(), buff.size());
+ m_head = static_cast<uint32_t>((m_head + 1) % m_queue.size());
+ m_is_empty = false;
+ }
+ m_cv.notify_all();
+
+ return HAILO_SUCCESS;
+ }
+
+ Expected<MemoryView> front(const std::chrono::milliseconds &timeout)
+ {
+ auto status = HAILO_SUCCESS;
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+
+ auto wait_res = m_cv.wait_for(lock, timeout, [this, &status] {
+ if (m_should_stop) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true;
+ }
+ return 0 < size();
+ });
+ CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "Failed to dequeue frame with status={}, timeout={}ms", HAILO_TIMEOUT, timeout.count());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("'front' was aborted by user");
+ return make_unexpected(status);
+ }
+ }
+ m_cv.notify_all();
+
+ return MemoryView(m_queue[m_tail]);
+ }
+
+ void pop()
+ {
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_tail = static_cast<uint32_t>((m_tail + 1) % m_queue.size());
+ if (m_tail == m_head) {
+ m_is_empty = true;
+ }
+ }
+ m_cv.notify_all();
+ }
+
+ size_t size()
+ {
+ if (m_head == m_tail) {
+ return m_is_empty ? 0 : m_queue.size();
+ } else if (m_head > m_tail) {
+ return (m_head - m_tail);
+ } else {
+ return (m_queue.size() - m_tail) + m_head;
+ }
+ }
+
+ void abort()
+ {
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_should_stop = true;
+ }
+ m_cv.notify_all();
+ }
+
+ void clear_abort()
+ {
+ {
+ std::unique_lock<std::mutex> lock(m_mutex);
+ m_should_stop = false;
+ }
+ m_cv.notify_all();
+ }
+
+ BuffersQueue(std::vector<Buffer> &&queue) : m_queue(std::move(queue)), m_head(0), m_tail(0),
+ m_is_empty(true), m_should_stop(false)
+ {}
+
+private:
+ std::vector<Buffer> m_queue;
+ std::atomic_uint32_t m_head;
+ std::atomic_uint32_t m_tail;
+
+ std::atomic_bool m_is_empty;
+
+ std::condition_variable m_cv;
+ std::mutex m_mutex;
+ std::atomic_bool m_should_stop;
+};
+
+class MultiDeviceScheduledInputStream : public ScheduledInputStream {
+public:
+ MultiDeviceScheduledInputStream(
+ std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
+ const scheduler_core_op_handle_t &core_op_handle,
+ EventPtr &&core_op_activated_event,
+ const LayerInfo &layer_info,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler,
+ std::unique_ptr<BuffersQueue> &&frames_queue,
+ hailo_status &status) :
+ ScheduledInputStream(std::move(streams), core_op_handle,
+ std::move(core_op_activated_event), layer_info, core_ops_scheduler, status),
+ m_queue(std::move(frames_queue))
+ {}
+
+ virtual hailo_status send_pending_buffer(size_t device_index = 0) override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+
+protected:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer,
+ const std::function<bool()> &should_cancel = []() { return false; }) override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+
+private:
+ size_t get_queue_size() const;
+
+ std::unique_ptr<BuffersQueue> m_queue;
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * TODO: Rename in a different PR
+ * @file network_group_scheduler.cpp
+ * @brief: Network scheduler
+ **/
+
+#include "common/os_utils.hpp"
+
+
+#include "vdevice/scheduler/network_group_scheduler.hpp"
+#include "vdevice/vdevice_core_op.hpp"
+#include "vdevice/scheduler/scheduler_oracle.hpp"
+#include "vdevice/vdevice_stream_multiplexer_wrapper.hpp"
+#include "hef/hef_internal.hpp"
+#include "utils/profiler/tracer_macros.hpp"
+
+#include <fstream>
+
+
+namespace hailort
+{
+
+#define SINGLE_CONTEXT_BATCH_SIZE (1)
+#define DEFAULT_BURST_SIZE (1)
+
+// TODO: use device handles instead device count
+CoreOpsScheduler::CoreOpsScheduler(hailo_scheduling_algorithm_t algorithm, uint32_t device_count, std::vector<std::string> &devices_bdf_id,
+ std::vector<std::string> &devices_arch) :
+ SchedulerBase(algorithm, device_count, devices_bdf_id, devices_arch),
+ m_changing_current_batch_size(),
+ m_should_core_op_stop(),
+ m_before_read_write_mutex(),
+ m_core_ops_cvs(),
+ m_should_monitor(false)
+#if defined(__GNUC__)
+ , m_mon_tmp_output()
+#endif
+{
+ // TODO: HRT-7391 - Change scheduler monitor to work only when MON command is active
+ m_should_monitor = SchedulerMon::should_monitor();
+ if (m_should_monitor) {
+ auto status = start_mon();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to initiate hailo monitor of networks, with status {}", status);
+ }
+ }
+}
+
+CoreOpsScheduler::~CoreOpsScheduler()
+{
+ for (auto device_info : m_devices) {
+ if (INVALID_CORE_OP_HANDLE != device_info->current_core_op_handle) {
+ auto current_core_op = m_scheduled_core_ops[device_info->current_core_op_handle]->get_core_op();
+ auto current_core_op_bundle = std::dynamic_pointer_cast<VDeviceCoreOp>(current_core_op);
+ assert(nullptr != current_core_op_bundle);
+ auto vdma_core_op = current_core_op_bundle->get_core_op_by_device_index(device_info->device_id);
+ if (!vdma_core_op) {
+ LOGGER__ERROR("Error retrieving core-op in scheduler destructor");
+ } else {
+ static const auto RESUME_PENDING_STREAM_TRANSFERS = true;
+ if (HAILO_SUCCESS != VdmaConfigManager::switch_core_op(vdma_core_op.value(), nullptr, 0,
+ RESUME_PENDING_STREAM_TRANSFERS)) {
+ LOGGER__ERROR("Error deactivating core-op when destroying scheduler");
+ }
+ }
+ }
+ }
+
+ if (m_should_monitor) {
+ m_should_monitor = false;
+ m_mon_shutdown_event->signal();
+ if (m_mon_thread.joinable()) {
+ m_mon_thread.join();
+ }
+ }
+}
+
+Expected<CoreOpsSchedulerPtr> CoreOpsScheduler::create_round_robin(uint32_t device_count, std::vector<std::string> &devices_bdf_id, std::vector<std::string> &devices_arch)
+{
+ auto ptr = make_shared_nothrow<CoreOpsScheduler>(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN, device_count, devices_bdf_id, devices_arch);
+ CHECK_AS_EXPECTED(nullptr != ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return ptr;
+}
+
+std::string get_curr_pid_as_str()
+{
+ return std::to_string(OsUtils::get_curr_pid());
+}
+
+hailo_status CoreOpsScheduler::start_mon()
+{
+#if defined(__GNUC__)
+ m_last_measured_timestamp = std::chrono::steady_clock::now();
+ m_mon_shutdown_event = Event::create_shared(Event::State::not_signalled);
+ CHECK(nullptr != m_mon_shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+ auto device_count = get_device_count();
+ for (uint32_t i = 0; i < device_count; i++) {
+ m_last_measured_utilization_timestamp[i] = {};
+ m_device_has_drained_everything[i] = true;
+ m_device_utilization[i] = 0;
+ }
+
+ auto tmp_file = open_temp_mon_file();
+ CHECK_EXPECTED_AS_STATUS(tmp_file);
+ m_mon_tmp_output = tmp_file.release();
+
+ m_mon_thread = std::thread([this] ()
+ {
+ while (m_should_monitor) {
+ auto status = m_mon_shutdown_event->wait(DEFAULT_SCHEDULER_MON_INTERVAL);
+ if (HAILO_TIMEOUT == status) {
+ dump_state();
+ } else if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Scheduler monitor failed with status {}", status);
+ return;
+ }
+ }
+ return;
+ });
+
+ return HAILO_SUCCESS;
+#else
+ return HAILO_NOT_IMPLEMENTED;
+#endif
+}
+
+#if defined(__GNUC__)
+Expected<std::shared_ptr<TempFile>> CoreOpsScheduler::open_temp_mon_file()
+{
+ std::string file_name = get_curr_pid_as_str();
+ auto tmp_file = TempFile::create(file_name, SCHEDULER_MON_TMP_DIR);
+ CHECK_EXPECTED(tmp_file);
+
+ auto tmp_file_ptr = make_shared_nothrow<TempFile>(tmp_file.release());
+ CHECK_AS_EXPECTED(nullptr != tmp_file_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return tmp_file_ptr;
+}
+
+void CoreOpsScheduler::dump_state()
+{
+ auto file = LockedFile::create(m_mon_tmp_output->name(), "w");
+ if (HAILO_SUCCESS != file.status()) {
+ LOGGER__ERROR("Failed to open and lock file {}, with status: {}", m_mon_tmp_output->name(), file.status());
+ return;
+ }
+
+ ProtoMon mon;
+ mon.set_pid(get_curr_pid_as_str());
+ time_dependent_events_cycle_calc();
+ log_monitor_networks_infos(mon);
+ log_monitor_device_infos(mon);
+ log_monitor_frames_infos(mon);
+
+ // Clear accumulators
+ for (auto &handle_core_op_utilization_pair : m_core_op_utilization) {
+ handle_core_op_utilization_pair.second = 0;
+ }
+ for (auto &handle_fps_pair : m_fps_accumulator) {
+ handle_fps_pair.second = 0;
+ }
+ for (auto &handle_device_utilization_pair: m_device_utilization) {
+ handle_device_utilization_pair.second = 0;
+ }
+
+ if (!mon.SerializeToFileDescriptor(file->get_fd())) {
+ LOGGER__ERROR("Failed to SerializeToFileDescriptor(), with errno: {}", errno);
+ }
+}
+#endif
+
+std::string CoreOpsScheduler::get_core_op_name(const scheduler_core_op_handle_t &core_op_handle)
+{
+ assert(m_scheduled_core_ops.size() > core_op_handle);
+ return m_scheduled_core_ops[core_op_handle]->get_core_op_name();
+}
+
+// TODO: HRT-9804 - Change monitor to use the tracer design mechanism (curently this functions uses private members)
+void CoreOpsScheduler::time_dependent_events_cycle_calc()
+{
+ auto curr_time = std::chrono::steady_clock::now();
+ m_last_measured_time_duration = std::chrono::duration_cast<std::chrono::duration<double>>(curr_time - m_last_measured_timestamp).count();
+
+ for (auto device_info : m_devices) {
+ if (!m_device_has_drained_everything[device_info->device_id]) {
+ update_utilization_read_buffers_finished(device_info->device_id, device_info->current_core_op_handle, false);
+ }
+ }
+
+ m_last_measured_timestamp = curr_time;
+}
+
+void CoreOpsScheduler::log_monitor_device_infos(ProtoMon &mon)
+{
+ for (auto device_info : m_devices) {
+ assert(contains(m_device_utilization, device_info->device_id));
+ auto curr_device_utilization = m_device_utilization[device_info->device_id];
+ auto utilization_precentage = ((curr_device_utilization * 100) / m_last_measured_time_duration);
+
+ auto device_infos = mon.add_device_infos();
+ device_infos->set_device_id(device_info->device_bdf_id);
+ device_infos->set_utilization(utilization_precentage);
+ device_infos->set_device_arch(device_info->device_arch);
+ }
+}
+
+void CoreOpsScheduler::log_monitor_networks_infos(ProtoMon &mon)
+{
+ for (uint32_t core_op_handle = 0; core_op_handle < m_core_op_utilization.size(); core_op_handle++) {
+ assert(contains(m_core_op_utilization, core_op_handle));
+ auto curr_core_op_utilization = m_core_op_utilization[core_op_handle];
+ auto utilization = ((curr_core_op_utilization * 100) / m_last_measured_time_duration);
+ auto outputs_count = static_cast<uint32_t>(m_scheduled_core_ops[core_op_handle]->get_outputs_names().size());
+ auto fps = static_cast<double>((m_fps_accumulator[core_op_handle] / outputs_count) / m_last_measured_time_duration);
+
+ auto net_info = mon.add_networks_infos();
+ net_info->set_network_name(get_core_op_name(core_op_handle));
+ net_info->set_utilization(utilization);
+ net_info->set_fps(fps);
+ }
+}
+
+void CoreOpsScheduler::log_monitor_frames_infos(ProtoMon &mon)
+{
+ for (uint32_t core_op_handle = 0; core_op_handle < m_scheduled_core_ops.size(); core_op_handle++) {
+ auto net_frames_info = mon.add_net_frames_infos();
+ net_frames_info->set_network_name(get_core_op_name(core_op_handle));
+
+ for (auto &stream_name : m_scheduled_core_ops[core_op_handle]->get_inputs_names()) {
+ auto stream_frames_info = net_frames_info->add_streams_frames_infos();
+ stream_frames_info->set_stream_name(stream_name);
+ stream_frames_info->set_stream_direction(PROTO__STREAM_DIRECTION__HOST_TO_DEVICE);
+ auto status = set_h2d_frames_counters(core_op_handle, stream_name, *stream_frames_info);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to set stream's {} frames count, status = {}", stream_name, status);
+ continue;
+ }
+ }
+
+ for (auto &stream_name : m_scheduled_core_ops[core_op_handle]->get_outputs_names()) {
+ auto stream_frames_info = net_frames_info->add_streams_frames_infos();
+ stream_frames_info->set_stream_name(stream_name);
+ stream_frames_info->set_stream_direction(PROTO__STREAM_DIRECTION__DEVICE_TO_HOST);
+ auto status = set_d2h_frames_counters(core_op_handle, stream_name, *stream_frames_info);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to set stream's {} frames count, status = {}", stream_name, status);
+ continue;
+ }
+ }
+ }
+}
+
+hailo_status CoreOpsScheduler::set_h2d_frames_counters(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ ProtoMonStreamFramesInfo &stream_frames_info)
+{
+ assert(m_scheduled_core_ops.size() > core_op_handle);
+ auto current_cng = m_scheduled_core_ops[core_op_handle]->get_core_op();
+
+ auto input_stream = current_cng->get_input_stream_by_name(stream_name);
+ CHECK_EXPECTED_AS_STATUS(input_stream);
+
+ InputStreamBase &vdevice_input = static_cast<InputStreamBase&>(input_stream->get());
+ auto buffer_frames_size = vdevice_input.get_buffer_frames_size();
+ if (HAILO_SUCCESS == buffer_frames_size.status()) {
+ stream_frames_info.set_buffer_frames_size(static_cast<int32_t>(buffer_frames_size.value()));
+ } else {
+ stream_frames_info.set_buffer_frames_size(SCHEDULER_MON_NAN_VAL);
+ }
+
+ auto pending_frames_count = vdevice_input.get_pending_frames_count();
+ if (HAILO_SUCCESS == pending_frames_count.status()) {
+ stream_frames_info.set_pending_frames_count(static_cast<int32_t>(pending_frames_count.value()));
+ } else {
+ stream_frames_info.set_pending_frames_count(SCHEDULER_MON_NAN_VAL);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::set_d2h_frames_counters(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ ProtoMonStreamFramesInfo &stream_frames_info)
+{
+ assert(m_scheduled_core_ops.size() > core_op_handle);
+ auto current_cng = m_scheduled_core_ops[core_op_handle]->get_core_op();
+
+ auto output_stream = current_cng->get_output_stream_by_name(stream_name);
+ CHECK_EXPECTED_AS_STATUS(output_stream);
+
+ OutputStreamBase &vdevice_output = static_cast<OutputStreamBase&>(output_stream->get());
+ auto buffer_frames_size = vdevice_output.get_buffer_frames_size();
+ if (HAILO_SUCCESS == buffer_frames_size.status()) {
+ stream_frames_info.set_buffer_frames_size(static_cast<int32_t>(buffer_frames_size.value()));
+ } else {
+ stream_frames_info.set_buffer_frames_size(SCHEDULER_MON_NAN_VAL);
+ }
+
+ auto pending_frames_count = vdevice_output.get_pending_frames_count();
+ if (HAILO_SUCCESS == pending_frames_count.status()) {
+ stream_frames_info.set_pending_frames_count(static_cast<int32_t>(pending_frames_count.value()));
+ } else {
+ stream_frames_info.set_pending_frames_count(SCHEDULER_MON_NAN_VAL);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<scheduler_core_op_handle_t > CoreOpsScheduler::add_core_op(std::shared_ptr<CoreOp> added_cng)
+{
+ scheduler_core_op_handle_t core_op_handle = INVALID_CORE_OP_HANDLE;
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+
+ core_op_handle = static_cast<uint32_t>(m_scheduled_core_ops.size());
+ TRACE(AddCoreOpTrace, "", added_cng->name(), DEFAULT_SCHEDULER_TIMEOUT.count(), DEFAULT_SCHEDULER_MIN_THRESHOLD, core_op_handle);
+
+ auto stream_infos = added_cng->get_all_stream_infos();
+ CHECK_EXPECTED(stream_infos);
+
+ auto scheduled_core_op = ScheduledCoreOp::create(added_cng, stream_infos.value());
+ CHECK_EXPECTED(scheduled_core_op);
+
+ m_scheduled_core_ops.emplace_back(scheduled_core_op.release());
+
+ m_changing_current_batch_size[core_op_handle] = false;
+
+ for (const auto &stream_info : stream_infos.value()) {
+ m_should_core_op_stop[core_op_handle][stream_info.name] = false;
+ }
+
+ for (auto& device_info : m_devices) {
+ for (const auto &stream_info : stream_infos.value()) {
+ if (HAILO_H2D_STREAM == stream_info.direction) {
+ device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle][stream_info.name] = 0;
+ } else {
+ device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle][stream_info.name] = 0;
+ device_info->current_cycle_finished_read_frames_d2h[core_op_handle][stream_info.name] = 0;
+ }
+ }
+ }
+
+ // Monitor members
+ m_core_op_utilization[core_op_handle] = 0;
+ m_fps_accumulator[core_op_handle] = 0;
+
+ auto network_cvs = ScheduledCoreOpCV::create(added_cng);
+ CHECK_EXPECTED(network_cvs);
+ m_core_ops_cvs[core_op_handle] = network_cvs.release();
+ m_core_op_priority[HAILO_SCHEDULER_PRIORITY_NORMAL].emplace_back(core_op_handle);
+ }
+
+ return core_op_handle;
+}
+
+bool CoreOpsScheduler::is_core_op_active(const scheduler_core_op_handle_t &core_op_handle)
+{
+ for (auto device_info : m_devices) {
+ if (core_op_handle == device_info->current_core_op_handle) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool CoreOpsScheduler::is_multi_device()
+{
+ return m_devices.size() > 1;
+}
+
+hailo_status CoreOpsScheduler::wait_for_write(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ const std::chrono::milliseconds &timeout, const std::function<bool()> &should_cancel)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+
+ hailo_status status = HAILO_SUCCESS;
+ auto wait_res = m_core_ops_cvs[core_op_handle]->wait_for(stream_name, lock, timeout, [this, core_op_handle, stream_name, &should_cancel, &status] {
+
+ if (should_cancel()) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+
+ if (should_core_op_stop(core_op_handle)) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+
+ return m_scheduled_core_ops[core_op_handle]->can_stream_write(stream_name);
+ });
+ CHECK(wait_res, HAILO_TIMEOUT, "{} (H2D) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ m_scheduled_core_ops[core_op_handle]->mark_frame_sent();
+ m_scheduled_core_ops[core_op_handle]->requested_write_frames().increase(stream_name);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::signal_write_finish(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ bool did_write_fail)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+ assert(m_scheduled_core_ops.size() > core_op_handle);
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+
+ if (did_write_fail) {
+ scheduled_core_op->requested_write_frames().decrease(stream_name);
+ return HAILO_SUCCESS;
+ }
+
+ if (should_core_op_stop(core_op_handle)) {
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+
+ scheduled_core_op->finished_write_frames().increase(stream_name);
+ scheduled_core_op->requested_write_frames().decrease(stream_name);
+
+ auto device_id = CoreOpsSchedulerOracle::get_avail_device(*this, core_op_handle);
+ if (INVALID_DEVICE_ID != device_id) {
+ auto status = switch_core_op(core_op_handle, device_id);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("switch_core_op has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+
+ auto status = optimize_streaming_if_enabled(core_op_handle);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::switch_core_op(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id, bool /*keep_nn_config*/)
+{
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+ auto curr_device_info = m_devices[device_id];
+
+ // initialize current cycle maps
+ for (const auto &name : scheduled_core_op->get_inputs_names()) {
+ curr_device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle][name] = 0;
+ }
+
+ for (const auto &name : scheduled_core_op->get_outputs_names()) {
+ curr_device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle][name] = 0;
+ curr_device_info->current_cycle_finished_read_frames_d2h[core_op_handle][name] = 0;
+ }
+
+ uint16_t batch_size = SINGLE_CONTEXT_BATCH_SIZE;
+ uint16_t burst_size = static_cast<uint16_t>(scheduled_core_op->finished_write_frames_min_value());
+ // In multi device finished write frame could be bigger then the vdma buffers we have, can be removed after dynamic desc binding.
+ if (is_multi_device()) {
+ burst_size = std::min(burst_size, get_min_avail_buffers_count(core_op_handle, device_id));
+ // We limit the max burst size to (dev_count * max_batch) to keep former behavior (this was the buffer_pool size)
+ // TODO: remove this limitation and work with user-controlled max_burst_size
+ burst_size = std::min(burst_size, static_cast<uint16_t>(scheduled_core_op->get_max_batch_size() * get_device_count()));
+ }
+
+ if (scheduled_core_op->use_dynamic_batch_flow()) {
+ batch_size = std::min(static_cast<uint16_t>(scheduled_core_op->finished_write_frames_min_value()), scheduled_core_op->get_max_batch_size());
+ burst_size = batch_size;
+ }
+
+ bool has_same_batch_size_as_previous = (curr_device_info->current_batch_size == batch_size);
+ curr_device_info->current_batch_size = batch_size;
+
+ if (curr_device_info->current_core_op_handle != core_op_handle) {
+ curr_device_info->is_switching_core_op = false;
+ }
+
+ if ((core_op_handle != curr_device_info->current_core_op_handle) || (!has_same_batch_size_as_previous)) {
+ assert(m_scheduled_core_ops.size() > core_op_handle);
+ auto next_active_cng = scheduled_core_op->get_core_op();
+ auto next_active_cng_wrapper = std::dynamic_pointer_cast<VDeviceCoreOp>(next_active_cng);
+ assert(nullptr != next_active_cng_wrapper);
+ auto next_active_cng_expected = next_active_cng_wrapper->get_core_op_by_device_index(curr_device_info->device_id);
+ CHECK_EXPECTED_AS_STATUS(next_active_cng_expected);
+
+ std::shared_ptr<VdmaConfigCoreOp> current_active_vdma_cng = nullptr;
+ if (curr_device_info->current_core_op_handle != INVALID_CORE_OP_HANDLE) {
+ auto current_active_cng = m_scheduled_core_ops[curr_device_info->current_core_op_handle]->get_core_op();
+ auto current_active_cng_bundle = std::dynamic_pointer_cast<VDeviceCoreOp>(current_active_cng);
+ assert(nullptr != current_active_cng_bundle);
+ auto current_active_cng_expected = current_active_cng_bundle->get_core_op_by_device_index(curr_device_info->device_id);
+ CHECK_EXPECTED_AS_STATUS(current_active_cng_expected);
+ current_active_vdma_cng = current_active_cng_expected.release();
+ }
+
+ TRACE(SwitchCoreOpTrace, "", core_op_handle);
+ static const auto RESUME_PENDING_STREAM_TRANSFERS = true;
+ auto status = VdmaConfigManager::switch_core_op(current_active_vdma_cng, next_active_cng_expected.value(), batch_size,
+
+ RESUME_PENDING_STREAM_TRANSFERS);
+ CHECK_SUCCESS(status, "Failed switching core-op");
+ // Clear the ready_to_switch flag from old activation
+ scheduled_core_op->mark_unready_to_switch();
+
+ // Register to get interrupts - has to be after core-op is activated
+ for (auto &output_stream : next_active_cng_expected.value()->get_output_streams()) {
+ OutputStreamBase &vdevice_output = static_cast<OutputStreamBase&>(output_stream.get());
+ status = vdevice_output.register_interrupt_callback(
+ [this, name = output_stream.get().name(), format = vdevice_output.get_layer_info().format.order, scheduled_core_op, core_op_handle, device_id]
+ (uint32_t frames) {
+ auto should_notify_next = false;
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+ // In order to meet performance requirement we enable switch only after first frame is arrived.
+ // TODO: remove this hack / move it to oracle and add another scheduling algorithm for it
+ scheduled_core_op->mark_ready_to_switch();
+ if (hailo_format_order_t::HAILO_FORMAT_ORDER_HAILO_NMS != format) {
+ TRACE(OutputVdmaEnqueueTrace, "", core_op_handle, name, frames);
+ // TODO: Remove d2h_finished_transferred_frames and use current_cycle_finished_transferred_frames_d2h instead
+ scheduled_core_op->d2h_finished_transferred_frames(name) += frames;
+ m_devices[device_id]->current_cycle_finished_transferred_frames_d2h[core_op_handle][name] += frames;
+ }
+
+ auto has_drained_everything = has_core_op_drained_everything(core_op_handle, device_id);
+
+ if (m_should_monitor) {
+ update_utilization_read_buffers_finished(device_id, core_op_handle, has_drained_everything);
+ }
+
+ // If ng finished and we didnt choose next lets choose without checking threshold
+ if (!m_devices[device_id]->is_switching_core_op && has_drained_everything) {
+ auto was_chosen = choose_next_core_op(device_id, true);
+ if (!was_chosen) {
+ choose_next_core_op(device_id, false);
+ }
+ }
+
+ if (m_devices[device_id]->is_switching_core_op && has_drained_everything) {
+ should_notify_next = true;
+ }
+ }
+ // Notify stream that new frame was accepted (wait_for_read)
+ m_core_ops_cvs[core_op_handle]->notify_one(name);
+ if (should_notify_next) {
+ auto next_core_op = m_devices[device_id]->next_core_op_handle;
+ // Notify all the threads of the next ng (wait_for_read)
+ m_core_ops_cvs[next_core_op]->notify_all();
+ }
+ });
+ CHECK_SUCCESS(status);
+ }
+ }
+
+ scheduled_core_op->set_last_run_timestamp(std::chrono::steady_clock::now()); // Mark timestamp on activation
+ curr_device_info->current_core_op_handle = core_op_handle;
+
+ // Finished switching batch size
+ m_changing_current_batch_size[core_op_handle] = false;
+
+ auto status = send_all_pending_buffers(core_op_handle, device_id, burst_size);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("send_all_pending_buffers has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::send_all_pending_buffers(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id, uint32_t burst_size)
+{
+ auto current_device_info = m_devices[device_id];
+ if ((INVALID_CORE_OP_HANDLE == current_device_info->current_core_op_handle) || (current_device_info->current_core_op_handle != core_op_handle)) {
+ return HAILO_SUCCESS;
+ }
+
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+
+ for (size_t i = 0; i < burst_size; i++) {
+ auto finished_send = false;
+ for (const auto &name : scheduled_core_op->get_inputs_names()) {
+ if (scheduled_core_op->finished_write_frames(name) == 0) {
+ finished_send = true;
+ break;
+ }
+ }
+ if (finished_send) {
+ break;
+ }
+ for (const auto &name : scheduled_core_op->get_inputs_names()) {
+ auto status = send_pending_buffer(core_op_handle, name, device_id);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("send_pending_buffer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+ scheduled_core_op->push_device_index(device_id);
+ scheduled_core_op->set_last_device_index(device_id);
+
+ if (m_should_monitor) {
+ update_utilization_send_started(device_id);
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::send_pending_buffer(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ uint32_t device_id)
+{
+ assert(m_scheduled_core_ops.size() > core_op_handle);
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+
+ auto current_cng = scheduled_core_op->get_core_op();
+ auto input_stream = current_cng->get_input_stream_by_name(stream_name);
+ CHECK_EXPECTED_AS_STATUS(input_stream);
+
+ VDeviceInputStreamMultiplexerWrapper &vdevice_input = static_cast<VDeviceInputStreamMultiplexerWrapper&>(input_stream->get());
+ TRACE(InputVdmaDequeueTrace, "", core_op_handle, stream_name);
+ auto status = vdevice_input.send_pending_buffer(device_id);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("send_pending_buffer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ m_devices[device_id]->current_cycle_requested_transferred_frames_h2d[core_op_handle][stream_name]++;
+ scheduled_core_op->finished_write_frames().decrease(stream_name);
+
+ scheduled_core_op->h2d_finished_transferred_frames().increase(stream_name);
+
+ if (should_core_op_stop(core_op_handle)) {
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+CoreOpsScheduler::ReadyInfo CoreOpsScheduler::is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold)
+{
+ ReadyInfo result;
+ result.is_ready = false;
+
+ if (should_core_op_stop(core_op_handle)) {
+ // Do not switch to an aborted core-op
+ return result;
+ }
+
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+ // Check if there arent any write requests
+ bool has_pending_writes = scheduled_core_op->finished_write_frames_min_value() > 0;
+
+ // Check if there arent any read requests
+ bool has_pending_user_reads = false;
+ for (const auto &name : scheduled_core_op->get_outputs_names()) {
+ if (scheduled_core_op->requested_read_frames(name) > 0) {
+ has_pending_user_reads = true;
+ break;
+ }
+ }
+
+ std::vector<bool> over_threshold;
+ over_threshold.reserve(scheduled_core_op->get_inputs_names().size());
+ std::vector<bool> over_timeout;
+ over_timeout.reserve(scheduled_core_op->get_inputs_names().size());
+
+ if (check_threshold) {
+ for (const auto &name : scheduled_core_op->get_inputs_names()) {
+ auto threshold_exp = scheduled_core_op->get_threshold(name);
+ if (!threshold_exp) {
+ LOGGER__ERROR("Failed to get threshold for stream {}", name);
+ return result;
+ }
+ auto threshold = (DEFAULT_SCHEDULER_MIN_THRESHOLD == threshold_exp.value()) ? 1 : threshold_exp.value();
+ auto timeout_exp = scheduled_core_op->get_timeout();
+ if (!timeout_exp) {
+ LOGGER__ERROR("Failed to get timeout for stream {}", name);
+ return result;
+ }
+ auto timeout = timeout_exp.release();
+
+ // Check if there arent enough write requests to reach threshold and timeout didnt passed
+ auto write_requests = scheduled_core_op->requested_write_frames(name) + scheduled_core_op->finished_write_frames(name);
+ auto stream_over_threshold = write_requests >= threshold;
+ auto stream_over_timeout = timeout <= (std::chrono::steady_clock::now() - scheduled_core_op->get_last_run_timestamp());
+ over_threshold.push_back(stream_over_threshold);
+ over_timeout.push_back(stream_over_timeout);
+ if (stream_over_threshold || stream_over_timeout) {
+ continue;
+ } else {
+ result.is_ready = false;
+ return result;
+ }
+ }
+ }
+
+ result.threshold = std::all_of(over_threshold.begin(), over_threshold.end(), [](auto over) { return over; });
+ result.timeout = std::all_of(over_timeout.begin(), over_timeout.end(), [](auto over) { return over; });
+ result.is_ready = has_pending_writes && has_pending_user_reads;
+
+ return result;
+}
+
+Expected<uint32_t> CoreOpsScheduler::wait_for_read(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ const std::chrono::milliseconds &timeout)
+{
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+ scheduled_core_op->requested_read_frames().increase(stream_name);
+
+ hailo_status status = HAILO_SUCCESS;
+ auto wait_res = m_core_ops_cvs[core_op_handle]->wait_for(stream_name, lock, timeout, [this, core_op_handle, scheduled_core_op, stream_name, &status] {
+
+ if (should_core_op_stop(core_op_handle)) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+
+ auto device_id = CoreOpsSchedulerOracle::get_avail_device(*this, core_op_handle);
+ if (INVALID_DEVICE_ID != device_id) {
+ status = switch_core_op(core_op_handle, device_id);
+ if (HAILO_SUCCESS != status) {
+ return true; // return true so that the wait will finish
+ }
+ }
+
+ return scheduled_core_op->can_stream_read(stream_name);
+ });
+ CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "{} (D2H) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ scheduled_core_op->requested_read_frames().decrease(stream_name);
+
+ return scheduled_core_op->pop_device_index(stream_name);
+}
+
+hailo_status CoreOpsScheduler::signal_read_finish(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name, uint32_t device_id)
+{
+ auto should_notify_next = false;
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+
+ scheduled_core_op->finished_read_frames().increase(stream_name);
+ m_devices[device_id]->current_cycle_finished_read_frames_d2h[core_op_handle][stream_name]++;
+ scheduled_core_op->d2h_finished_transferred_frames().decrease(stream_name);
+ m_fps_accumulator[core_op_handle]++;
+
+ decrease_core_op_counters(core_op_handle);
+
+ auto has_drained_everything = has_core_op_drained_everything(core_op_handle, device_id);
+ if (scheduled_core_op->is_nms() && has_drained_everything) {
+ // In NMS networks there is possibility that next wasn't choosen yet
+ choose_next_core_op(device_id, true);
+
+ // If we didnt choose with treshold or timeout lets choose without treshold
+ if (!m_devices[device_id]->is_switching_core_op) {
+ choose_next_core_op(device_id, false);
+ }
+
+ if (m_devices[device_id]->is_switching_core_op) {
+ should_notify_next = true;
+ }
+
+ if (m_should_monitor) {
+ update_utilization_read_buffers_finished(device_id, core_op_handle, has_drained_everything);
+ }
+ }
+ }
+
+ // Notify stream that frame was read and we have a space in the read buffers (wait_for_write)
+ m_core_ops_cvs[core_op_handle]->notify_all();
+
+ if (should_notify_next) {
+ // Notify all the threads of the next ng, for nms networks this is the only place we know the network was finished (wait_for_read)
+ m_core_ops_cvs[m_devices[device_id]->next_core_op_handle]->notify_all();
+ }
+
+ return HAILO_SUCCESS;
+}
+
+void CoreOpsScheduler::decrease_core_op_counters(const scheduler_core_op_handle_t &core_op_handle)
+{
+ return m_scheduled_core_ops[core_op_handle]->decrease_current_core_op_counters();
+}
+
+bool CoreOpsScheduler::has_core_op_drained_everything(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id)
+{
+ if (INVALID_CORE_OP_HANDLE == core_op_handle) {
+ // If no core-op is running, consider it as drained
+ return true;
+ }
+
+ if (core_op_all_streams_aborted(core_op_handle)) {
+ // We treat core-op as drained only if all streams are aborted - to make sure there aren't any ongoing transfers
+ return true;
+ }
+
+ if ((!m_scheduled_core_ops[core_op_handle]->is_nms()) && (is_multi_device() || m_scheduled_core_ops[core_op_handle]->use_dynamic_batch_flow())) {
+ auto current_device_info = m_devices[device_id];
+ auto max_transferred_h2d = get_max_value_of_unordered_map(current_device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle]);
+ auto min_transferred_d2h = get_min_value_of_unordered_map(current_device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle]);
+
+ return (max_transferred_h2d == min_transferred_d2h);
+ }
+
+ return m_scheduled_core_ops[core_op_handle]->has_core_op_drained_everything();
+}
+
+hailo_status CoreOpsScheduler::enable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+
+ if (!m_should_core_op_stop[core_op_handle][stream_name]) {
+ return HAILO_SUCCESS;
+ }
+
+ m_should_core_op_stop[core_op_handle][stream_name] = false;
+ }
+ m_core_ops_cvs[core_op_handle]->notify_all();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::disable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
+{
+ {
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+
+ if (m_should_core_op_stop[core_op_handle][stream_name]) {
+ return HAILO_SUCCESS;
+ }
+
+ m_should_core_op_stop[core_op_handle][stream_name] = true;
+ }
+ m_core_ops_cvs[core_op_handle]->notify_all();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status CoreOpsScheduler::set_timeout(const scheduler_core_op_handle_t &core_op_handle, const std::chrono::milliseconds &timeout, const std::string &/*network_name*/)
+{
+ // TODO: call in loop for set_timeout with the relevant stream-names (of the given network)
+ return m_scheduled_core_ops[core_op_handle]->set_timeout(timeout);
+}
+
+hailo_status CoreOpsScheduler::set_threshold(const scheduler_core_op_handle_t &core_op_handle, uint32_t threshold, const std::string &/*network_name*/)
+{
+ // TODO: call in loop for set_timeout with the relevant stream-names (of the given network)
+ return m_scheduled_core_ops[core_op_handle]->set_threshold(threshold);
+}
+
+hailo_status CoreOpsScheduler::set_priority(const scheduler_core_op_handle_t &core_op_handle, core_op_priority_t priority, const std::string &/*network_name*/)
+{
+ CHECK(priority <= HAILO_SCHEDULER_PRIORITY_MAX, HAILO_INVALID_ARGUMENT);
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+ auto old_priority = m_scheduled_core_ops[core_op_handle]->get_priority();
+ auto &priority_vector = m_core_op_priority[old_priority];
+ auto it = std::find(priority_vector.begin(), priority_vector.end(), core_op_handle);
+ CHECK(it != priority_vector.end(), HAILO_INTERNAL_FAILURE);
+
+ priority_vector.erase(it);
+ m_scheduled_core_ops[core_op_handle]->set_priority(priority);
+ m_core_op_priority[priority].push_back(core_op_handle);
+
+ return HAILO_SUCCESS;
+}
+
+bool CoreOpsScheduler::choose_next_core_op(size_t device_id, bool check_threshold)
+{
+ if (!m_devices[device_id]->is_switching_core_op) {
+ return CoreOpsSchedulerOracle::choose_next_model(*this, m_devices[device_id]->device_id, check_threshold);
+ }
+ return false;
+}
+
+bool CoreOpsScheduler::should_core_op_stop(const scheduler_core_op_handle_t &core_op_handle)
+{
+ for (const auto &name_flag_pair : m_should_core_op_stop[core_op_handle]) {
+ if (name_flag_pair.second) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool CoreOpsScheduler::core_op_all_streams_aborted(const scheduler_core_op_handle_t &core_op_handle)
+{
+ for (const auto &name_flag_pair : m_should_core_op_stop[core_op_handle]) {
+ if (!name_flag_pair.second) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void CoreOpsScheduler::notify_all()
+{
+ {
+ // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv
+ std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
+ }
+ // TODO: consider notify only the relevant ng or stream
+ for (auto &cng_cvs : m_core_ops_cvs) {
+ cng_cvs.second->notify_all();
+ }
+}
+
+hailo_status CoreOpsScheduler::optimize_streaming_if_enabled(const scheduler_core_op_handle_t &core_op_handle)
+{
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+
+ if ((!scheduled_core_op->use_dynamic_batch_flow()) && !(scheduled_core_op->is_ready_to_switch() &&
+ CoreOpsSchedulerOracle::should_stop_streaming(*this, scheduled_core_op->get_priority()))) {
+ for (uint32_t i = 0; i < m_devices.size(); i++) {
+ uint32_t index = scheduled_core_op->get_last_device_index() + i + 1;
+ index %= static_cast<uint32_t>(m_devices.size());
+ auto device_info = m_devices[index];
+ // If multi device check for space in the vdma buffers, the send pending buffer is waitable in the current implementation.
+ // can be removed after dynamic descriptor binding support
+ if (device_info->current_core_op_handle == core_op_handle &&
+ (!is_multi_device() || (get_min_avail_buffers_count(core_op_handle, device_info->device_id) >= DEFAULT_BURST_SIZE))) {
+ auto status = send_all_pending_buffers(core_op_handle, device_info->device_id, DEFAULT_BURST_SIZE);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("send_all_pending_buffers has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+ }
+ }
+
+ return HAILO_SUCCESS;
+}
+
+uint16_t CoreOpsScheduler::get_min_avail_buffers_count(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id)
+{
+ auto device_info = m_devices[device_id];
+ auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+
+ auto max_transferred_h2d = get_max_value_of_unordered_map(device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle]);
+ auto min_d2h_frames = scheduled_core_op->is_nms() ? get_min_value_of_unordered_map(device_info->current_cycle_finished_read_frames_d2h[core_op_handle]) :
+ get_min_value_of_unordered_map(device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle]);
+ auto ongoing_frames = static_cast<uint16_t>(max_transferred_h2d - min_d2h_frames);
+
+ uint16_t avail_buffers = static_cast<uint16_t>(scheduled_core_op->get_min_input_buffers_count(get_device_count()) - ongoing_frames);
+
+ return avail_buffers;
+}
+
+void CoreOpsScheduler::update_utilization_timers(scheduler_device_idx_t device_id, scheduler_core_op_handle_t core_op_handle)
+{
+ assert(contains(m_core_op_utilization, core_op_handle));
+
+ auto time_diff = std::chrono::duration_cast<std::chrono::duration<double>>(
+ std::chrono::steady_clock::now() - m_last_measured_utilization_timestamp[device_id]).count();
+
+ m_device_utilization[device_id] += time_diff;
+ m_core_op_utilization[core_op_handle] += time_diff;
+}
+
+void CoreOpsScheduler::update_utilization_timestamp(scheduler_device_idx_t device_id)
+{
+ m_last_measured_utilization_timestamp[device_id] = std::chrono::steady_clock::now();
+}
+
+void CoreOpsScheduler::update_utilization_send_started(scheduler_device_idx_t device_id)
+{
+ if (m_device_has_drained_everything[device_id]) {
+ update_device_drained_state(device_id, false);
+ update_utilization_timestamp(device_id);
+ }
+}
+
+void CoreOpsScheduler::update_device_drained_state(scheduler_device_idx_t device_id, bool state)
+{
+ m_device_has_drained_everything[device_id] = state;
+}
+
+void CoreOpsScheduler::update_utilization_read_buffers_finished(scheduler_device_idx_t device_id,
+ scheduler_core_op_handle_t core_op_handle, bool is_drained_everything)
+{
+ update_utilization_timers(device_id, core_op_handle);
+ update_device_drained_state(device_id, is_drained_everything);
+ if (!is_drained_everything) {
+ update_utilization_timestamp(device_id);
+ }
+}
+
+} /* namespace hailort */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file network_group_scheduler.hpp
+ * @brief Class declaration for CoreOpsScheduler that schedules core-ops to be active depending on the scheduling algorithm.
+ **/
+
+#ifndef _HAILO_NETWORK_GROUP_SCHEDULER_HPP_
+#define _HAILO_NETWORK_GROUP_SCHEDULER_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/utils.hpp"
+#include "common/filesystem.hpp"
+
+#include "vdevice/scheduler/scheduler_mon.hpp"
+#include "vdevice/scheduler/scheduled_core_op_state.hpp"
+#include "vdevice/scheduler/scheduled_core_op_cv.hpp"
+#include "vdevice/scheduler/scheduler_base.hpp"
+
+
+namespace hailort
+{
+
+#define INVALID_CORE_OP_HANDLE (UINT32_MAX)
+#define INVALID_DEVICE_ID (UINT32_MAX)
+
+using scheduler_core_op_handle_t = uint32_t;
+using core_op_priority_t = uint8_t;
+using scheduler_device_idx_t = uint32_t;
+
+class CoreOpsScheduler;
+using CoreOpsSchedulerPtr = std::shared_ptr<CoreOpsScheduler>;
+
+// We use mostly weak pointer for the scheduler to prevent circular dependency of the pointers
+using CoreOpsSchedulerWeakPtr = std::weak_ptr<CoreOpsScheduler>;
+
+using stream_name_t = std::string;
+
+class CoreOpsScheduler : public SchedulerBase
+{
+public:
+ static Expected<CoreOpsSchedulerPtr> create_round_robin(uint32_t device_count, std::vector<std::string> &devices_bdf_id,
+ std::vector<std::string> &devices_arch);
+ CoreOpsScheduler(hailo_scheduling_algorithm_t algorithm, uint32_t device_count, std::vector<std::string> &devices_bdf_id,
+ std::vector<std::string> &devices_arch);
+
+ virtual ~CoreOpsScheduler();
+ CoreOpsScheduler(const CoreOpsScheduler &other) = delete;
+ CoreOpsScheduler &operator=(const CoreOpsScheduler &other) = delete;
+ CoreOpsScheduler &operator=(CoreOpsScheduler &&other) = delete;
+ CoreOpsScheduler(CoreOpsScheduler &&other) noexcept = delete;
+
+ Expected<scheduler_core_op_handle_t> add_core_op(std::shared_ptr<CoreOp> added_core_op);
+
+ hailo_status wait_for_write(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ const std::chrono::milliseconds &timeout, const std::function<bool()> &should_cancel);
+ hailo_status signal_write_finish(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name, bool did_write_fail);
+ Expected<uint32_t> wait_for_read(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ const std::chrono::milliseconds &timeout);
+ hailo_status signal_read_finish(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name, uint32_t device_id);
+
+ hailo_status enable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
+ hailo_status disable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
+
+ hailo_status set_timeout(const scheduler_core_op_handle_t &core_op_handle, const std::chrono::milliseconds &timeout, const std::string &network_name);
+ hailo_status set_threshold(const scheduler_core_op_handle_t &core_op_handle, uint32_t threshold, const std::string &network_name);
+ hailo_status set_priority(const scheduler_core_op_handle_t &core_op_handle, core_op_priority_t priority, const std::string &network_name);
+
+ virtual ReadyInfo is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold) override;
+ virtual bool has_core_op_drained_everything(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id) override;
+
+ void notify_all();
+
+protected:
+ bool choose_next_core_op(size_t device_id, bool check_threshold);
+
+ std::unordered_map<scheduler_core_op_handle_t, std::atomic_bool> m_changing_current_batch_size;
+ std::unordered_map<scheduler_core_op_handle_t, std::map<stream_name_t, std::atomic_bool>> m_should_core_op_stop;
+
+private:
+ hailo_status switch_core_op(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id,
+ bool keep_nn_config = false);
+ void reset_current_core_op_timestamps(uint32_t device_id);
+
+ hailo_status send_all_pending_buffers(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id, uint32_t burst_size);
+ hailo_status send_pending_buffer(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name, uint32_t device_id);
+
+ void decrease_core_op_counters(const scheduler_core_op_handle_t &core_op_handle);
+ bool should_core_op_stop(const scheduler_core_op_handle_t &core_op_handle);
+ bool core_op_all_streams_aborted(const scheduler_core_op_handle_t &core_op_handle);
+
+ std::string get_core_op_name(const scheduler_core_op_handle_t &core_op_handle);
+ bool is_core_op_active(const scheduler_core_op_handle_t &core_op_handle);
+ bool is_multi_device();
+ hailo_status optimize_streaming_if_enabled(const scheduler_core_op_handle_t &network_group_handle);
+ uint16_t get_min_avail_buffers_count(const scheduler_core_op_handle_t &network_group_handle, uint32_t device_id);
+
+ hailo_status start_mon();
+ void time_dependent_events_cycle_calc();
+ void log_monitor_device_infos(ProtoMon &mon);
+ void log_monitor_networks_infos(ProtoMon &mon);
+ void log_monitor_frames_infos(ProtoMon &mon);
+ void update_utilization_timers(scheduler_device_idx_t device_id, scheduler_core_op_handle_t core_op_handle);
+ void update_utilization_timestamp(scheduler_device_idx_t device_id);
+ void update_utilization_send_started(scheduler_device_idx_t device_id);
+ void update_device_drained_state(scheduler_device_idx_t device_id, bool state);
+ void update_utilization_read_buffers_finished(scheduler_device_idx_t device_id, scheduler_core_op_handle_t core_op_hanle, bool is_drained_everything);
+ hailo_status set_h2d_frames_counters(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ ProtoMonStreamFramesInfo &stream_frames_info);
+ hailo_status set_d2h_frames_counters(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+ ProtoMonStreamFramesInfo &stream_frames_info);
+#if defined(__GNUC__)
+ Expected<std::shared_ptr<TempFile>> open_temp_mon_file();
+ void dump_state();
+#endif
+
+ std::vector<std::shared_ptr<ScheduledCoreOp>> m_scheduled_core_ops;
+ std::mutex m_before_read_write_mutex;
+ std::unordered_map<scheduler_core_op_handle_t, std::shared_ptr<ScheduledCoreOpCV>> m_core_ops_cvs;
+
+ // Params for the scheduler MON
+ std::atomic_bool m_should_monitor;
+ std::thread m_mon_thread;
+ EventPtr m_mon_shutdown_event;
+#if defined(__GNUC__)
+ std::shared_ptr<TempFile> m_mon_tmp_output;
+#endif
+ std::chrono::time_point<std::chrono::steady_clock> m_last_measured_timestamp;
+ double m_last_measured_time_duration;
+ std::unordered_map<scheduler_device_idx_t, double> m_device_utilization;
+ std::unordered_map<scheduler_device_idx_t, std::atomic_bool> m_device_has_drained_everything;
+ std::unordered_map<scheduler_device_idx_t, std::chrono::time_point<std::chrono::steady_clock>> m_last_measured_utilization_timestamp;
+ // TODO: Consider adding Accumulator classes for more info (min, max, mean, etc..)
+ std::unordered_map<scheduler_core_op_handle_t, double> m_core_op_utilization;
+ std::unordered_map<scheduler_core_op_handle_t, std::atomic_uint32_t> m_fps_accumulator;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_NETWORK_GROUP_SCHEDULER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file scheduled_core_op_cv.hpp
+ * @brief Class declaration for scheduled core-ops conditional variables
+ **/
+
+#ifndef _HAILO_SCHEDULED_CORE_OP_CV_HPP_
+#define _HAILO_SCHEDULED_CORE_OP_CV_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/utils.hpp"
+
+#include "vdevice/scheduler/scheduler_mon.hpp"
+
+#include <condition_variable>
+
+
+namespace hailort
+{
+
+class ScheduledCoreOpCV
+{
+public:
+ static Expected<std::shared_ptr<ScheduledCoreOpCV>> create(std::shared_ptr<CoreOp> added_cng)
+ {
+ auto stream_infos = added_cng->get_all_stream_infos();
+ CHECK_EXPECTED(stream_infos);
+
+ std::unordered_map<stream_name_t, std::shared_ptr<std::condition_variable>> cv_per_stream;
+ for (const auto &stream_info : stream_infos.value()) {
+ auto cv = make_shared_nothrow<std::condition_variable>();
+ CHECK_NOT_NULL_AS_EXPECTED(cv, HAILO_OUT_OF_HOST_MEMORY);
+ cv_per_stream[stream_info.name] = std::move(cv);
+ }
+
+ auto scheduled_core_op_cv = make_shared_nothrow<ScheduledCoreOpCV>(cv_per_stream);
+ CHECK_NOT_NULL_AS_EXPECTED(scheduled_core_op_cv, HAILO_OUT_OF_HOST_MEMORY);
+
+ return scheduled_core_op_cv;
+ }
+
+ virtual ~ScheduledCoreOpCV() = default;
+ ScheduledCoreOpCV(const ScheduledCoreOpCV &other) = delete;
+ ScheduledCoreOpCV &operator=(const ScheduledCoreOpCV &other) = delete;
+ ScheduledCoreOpCV &operator=(ScheduledCoreOpCV &&other) = delete;
+ ScheduledCoreOpCV(ScheduledCoreOpCV &&other) noexcept = delete;
+
+ void notify_one(const stream_name_t &name)
+ {
+ assert(contains(m_map, name));
+ m_map[name]->notify_one();
+ }
+
+ void notify_all()
+ {
+ for (auto &cv : m_map) {
+ cv.second->notify_one();
+ }
+ }
+
+ template<typename _Rep, typename _Period, typename _Predicate>
+ bool wait_for(const stream_name_t &name, std::unique_lock<std::mutex>& __lock, const std::chrono::duration<_Rep, _Period>& __rtime, _Predicate __p)
+ {
+ assert(contains(m_map, name));
+ return m_map[name]->wait_for(__lock, __rtime, __p);
+ }
+
+ ScheduledCoreOpCV(std::unordered_map<stream_name_t, std::shared_ptr<std::condition_variable>> cv_map) : m_map(std::move(cv_map))
+ {}
+
+private:
+ std::unordered_map<stream_name_t, std::shared_ptr<std::condition_variable>> m_map;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SCHEDULED_CORE_OP_CV_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file scheduled_core_op_state.cpp
+ * @brief: Scheduled CoreOp
+ **/
+
+#include "vdevice/vdevice_stream_multiplexer_wrapper.hpp"
+#include "vdevice/scheduler/scheduler_oracle.hpp"
+#include "vdevice/scheduler/scheduled_core_op_state.hpp"
+#include "hef/hef_internal.hpp"
+
+
+namespace hailort
+{
+
+#define SINGLE_CONTEXT_BATCH_SIZE (1)
+
+ScheduledCoreOp::ScheduledCoreOp(std::shared_ptr<CoreOp> core_op, std::chrono::milliseconds timeout,
+ uint16_t max_batch_size, StreamInfoVector &stream_infos, std::string core_op_name) :
+ m_core_op(core_op),
+ m_last_run_time_stamp(std::chrono::steady_clock::now()),
+ m_timeout(std::move(timeout)),
+ m_frame_was_sent(false),
+ m_max_batch_size(max_batch_size),
+ m_priority(HAILO_SCHEDULER_PRIORITY_NORMAL),
+ m_last_device_index(INVALID_DEVICE_ID),
+ m_core_op_name(core_op_name),
+ m_inputs_names(),
+ m_outputs_names(),
+ m_is_nms(false),
+ m_ready_to_switch(false)
+{
+ // Prepare empty counters for the added core-op
+ for (const auto &stream_info : stream_infos) {
+ m_min_threshold_per_stream[stream_info.name] = DEFAULT_SCHEDULER_MIN_THRESHOLD;
+ if (HAILO_H2D_STREAM == stream_info.direction) {
+ m_requested_write_frames.insert(stream_info.name);
+ m_finished_write_frames.insert(stream_info.name);
+ m_h2d_finished_transferred_frames.insert(stream_info.name);
+ m_inputs_names.push_back(stream_info.name);
+ } else {
+ m_requested_read_frames.insert(stream_info.name);
+ m_finished_read_frames.insert(stream_info.name);
+ m_d2h_finished_transferred_frames.insert(stream_info.name);
+ m_outputs_names.push_back(stream_info.name);
+ m_output_streams_read_orders[stream_info.name] = std::queue<uint32_t>();
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) {
+ m_is_nms = true;
+ }
+ }
+ }
+}
+
+Expected<std::shared_ptr<ScheduledCoreOp>> ScheduledCoreOp::create(std::shared_ptr<CoreOp> added_core_op, StreamInfoVector &stream_infos)
+{
+ auto timeout = DEFAULT_SCHEDULER_TIMEOUT;
+
+ uint16_t max_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE;
+ if (added_core_op->get_supported_features().multi_context) {
+ auto batch_size = added_core_op->get_stream_batch_size(stream_infos[0].name);
+ CHECK_EXPECTED(batch_size);
+ if (batch_size.value() > SINGLE_CONTEXT_BATCH_SIZE) {
+ max_batch_size = batch_size.release();
+ }
+ }
+
+ return make_shared_nothrow<ScheduledCoreOp>(added_core_op, timeout, max_batch_size, stream_infos, added_core_op->name());
+}
+
+bool ScheduledCoreOp::has_enough_space_in_read_buffers(uint32_t ongoing_frames)
+{
+ auto output_streams = m_core_op->get_output_streams();
+ for (auto &output_stream : output_streams) {
+ OutputStreamBase &vdevice_output = static_cast<OutputStreamBase&>(output_stream.get());
+ if (auto pending_frames_size = vdevice_output.get_buffer_frames_size()) {
+ if (pending_frames_size.value() <= ongoing_frames) {
+ return false;
+ }
+ // If couldnt get pending frames size and count (e.g. NMS layer), assume we have space - scheduler switch will prevent deadlocks here
+ }
+ }
+ return true;
+}
+
+uint16_t ScheduledCoreOp::get_min_input_buffers_count(uint32_t device_count)
+{
+ auto input_streams = m_core_op->get_input_streams();
+ uint16_t buffers_count = UINT16_MAX;
+ for (auto &input_stream : input_streams) {
+ InputStreamBase &vdevice_input = static_cast<InputStreamBase&>(input_stream.get());
+ if (auto pending_frames_size = vdevice_input.get_buffer_frames_size()) {
+ buffers_count = std::min(buffers_count, static_cast<uint16_t>(pending_frames_size.value() / device_count));
+ }
+ }
+ return buffers_count;
+}
+
+bool ScheduledCoreOp::has_input_written_most_frames(const std::string &stream_name)
+{
+ auto total_writes = total_written_frames_count();
+ return total_writes[stream_name] == get_max_value_of_unordered_map(total_writes);
+}
+
+// TODO: Use get_pre_transfer_h2d_frames_count + get_h2d_transferred_frames_count
+// TODO: Avoid returning map (malloc)
+std::unordered_map<stream_name_t, uint32_t> ScheduledCoreOp::total_written_frames_count()
+{
+ std::unordered_map<stream_name_t, uint32_t> write_sum;
+ for (const auto &name : get_inputs_names()) {
+ write_sum[name] = m_requested_write_frames[name] + m_finished_write_frames[name]
+ + m_h2d_finished_transferred_frames[name];
+ }
+ return write_sum;
+}
+
+// TODO: Use max(m_d2h_finished_transferred_frames) == 0 instead
+bool ScheduledCoreOp::has_pending_frames()
+{
+ auto h2d_transferred_frames_count = m_h2d_finished_transferred_frames.get_max_value();
+ for (const auto &name : get_outputs_names()) {
+ if (m_finished_read_frames[name] < h2d_transferred_frames_count) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ScheduledCoreOp::can_stream_read(const std::string &stream_name)
+{
+ return !m_output_streams_read_orders[stream_name].empty();
+}
+
+bool ScheduledCoreOp::can_stream_write(const std::string &stream_name)
+{
+ auto total_written_frames = total_written_frames_count()[stream_name];
+ auto min_finished_read = finished_read_frames_min_value();
+ auto ongoing_frames = (min_finished_read < total_written_frames) ? (total_written_frames - min_finished_read) : 0;
+ return has_enough_space_in_read_buffers(ongoing_frames);
+}
+
+
+bool ScheduledCoreOp::use_dynamic_batch_flow()
+{
+ return (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE != m_max_batch_size);
+}
+
+bool ScheduledCoreOp::has_core_op_drained_everything()
+{
+ uint32_t written_frames = m_h2d_finished_transferred_frames.get_max_value();
+ for (const auto &name : get_outputs_names()) {
+ if ((m_finished_read_frames[name] + m_d2h_finished_transferred_frames[name]) < written_frames) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void ScheduledCoreOp::decrease_current_core_op_counters()
+{
+ // Decrease only if counter is 2 or bigger because reaching 0 can cause states to change
+ if (!m_h2d_finished_transferred_frames.all_values_bigger_or_equal(2)) {
+ return;
+ }
+ if (!m_finished_read_frames.all_values_bigger_or_equal(2)) {
+ return;
+ }
+
+ for (const auto &name : get_inputs_names()) {
+ m_h2d_finished_transferred_frames[name]--;
+ }
+ for (const auto &name : get_outputs_names()) {
+ m_finished_read_frames[name]--;
+ }
+}
+
+uint32_t ScheduledCoreOp::get_pre_transfer_h2d_frames_count()
+{
+ std::unordered_map<stream_name_t, uint32_t> write_sum;
+ for (const auto &name : get_inputs_names()) {
+ write_sum[name] = m_requested_write_frames[name] + m_finished_write_frames[name];
+ }
+ return get_max_value_of_unordered_map(write_sum);
+}
+
+hailo_status ScheduledCoreOp::set_timeout(const std::chrono::milliseconds &timeout, const stream_name_t &stream_name)
+{
+ CHECK(!m_frame_was_sent, HAILO_INVALID_OPERATION,
+ "Setting scheduler timeout is allowed only before sending / receiving frames on the core-op.");
+ m_timeout = timeout;
+
+ auto name = (stream_name.empty()) ? get_core_op_name() : stream_name;
+ LOGGER__INFO("Setting scheduler timeout of {} to {}ms", name, timeout.count());
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status ScheduledCoreOp::set_threshold(uint32_t threshold, const stream_name_t &stream_name)
+{
+ CHECK((CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) ||
+ (threshold <= m_max_batch_size), HAILO_INVALID_ARGUMENT, "Threshold must be equal or lower than the maximum batch size!");
+
+ CHECK(!m_frame_was_sent, HAILO_INVALID_OPERATION,
+ "Setting scheduler threshold is allowed only before sending / receiving frames on the core-op.");
+
+ // TODO: Support setting threshold per stream. currently stream_name is always empty and de-facto we set threshold for the whole NG
+ for (auto &threshold_per_stream_pair : m_min_threshold_per_stream) {
+ threshold_per_stream_pair.second = threshold;
+ }
+
+ auto name = (stream_name.empty()) ? get_core_op_name() : stream_name;
+ LOGGER__INFO("Setting scheduler threshold of {} to {} frames", name, threshold);
+
+ return HAILO_SUCCESS;
+}
+
+core_op_priority_t ScheduledCoreOp::get_priority()
+{
+ return m_priority;
+}
+
+void ScheduledCoreOp::set_priority(core_op_priority_t priority)
+{
+ m_priority = priority;
+}
+
+uint32_t ScheduledCoreOp::get_last_device_index()
+{
+ return m_last_device_index;
+}
+
+void ScheduledCoreOp::set_last_device_index(uint32_t device_index)
+{
+ m_last_device_index = device_index;
+}
+
+std::string ScheduledCoreOp::get_core_op_name()
+{
+ return m_core_op_name;
+}
+
+std::shared_ptr<CoreOp> ScheduledCoreOp::get_core_op()
+{
+ return m_core_op;
+}
+
+void ScheduledCoreOp::mark_frame_sent()
+{
+ m_frame_was_sent = true;
+}
+
+std::chrono::time_point<std::chrono::steady_clock> ScheduledCoreOp::get_last_run_timestamp()
+{
+ return m_last_run_time_stamp;
+}
+
+void ScheduledCoreOp::set_last_run_timestamp(const std::chrono::time_point<std::chrono::steady_clock> ×tamp)
+{
+ m_last_run_time_stamp = timestamp;
+}
+
+Expected<std::chrono::milliseconds> ScheduledCoreOp::get_timeout(const stream_name_t &stream_name)
+{
+ CHECK_AS_EXPECTED(stream_name.empty(), HAILO_INVALID_OPERATION, "timeout per network is not supported");
+ auto timeout = m_timeout;
+ return timeout;
+}
+
+Expected<uint32_t> ScheduledCoreOp::get_threshold(const stream_name_t &stream_name)
+{
+ CHECK_AS_EXPECTED(contains(m_min_threshold_per_stream, stream_name), HAILO_NOT_FOUND);
+ return m_min_threshold_per_stream[stream_name].load();
+}
+
+uint16_t ScheduledCoreOp::get_max_batch_size()
+{
+ if (!use_dynamic_batch_flow()) {
+ return SINGLE_CONTEXT_BATCH_SIZE;
+ }
+ return m_max_batch_size;
+}
+
+Counter &ScheduledCoreOp::requested_write_frames()
+{
+ return m_requested_write_frames;
+}
+
+std::atomic_uint32_t &ScheduledCoreOp::requested_write_frames(const stream_name_t &stream_name)
+{
+ return m_requested_write_frames[stream_name];
+}
+
+Counter &ScheduledCoreOp::finished_write_frames()
+{
+ return m_finished_write_frames;
+}
+
+std::atomic_uint32_t &ScheduledCoreOp::finished_write_frames(const stream_name_t &stream_name)
+{
+ return m_finished_write_frames[stream_name];
+}
+
+uint32_t ScheduledCoreOp::finished_write_frames_min_value()
+{
+ return m_finished_write_frames.get_min_value();
+}
+
+Counter &ScheduledCoreOp::h2d_finished_transferred_frames()
+{
+ return m_h2d_finished_transferred_frames;
+}
+
+std::atomic_uint32_t &ScheduledCoreOp::h2d_finished_transferred_frames(const stream_name_t &stream_name)
+{
+ return m_h2d_finished_transferred_frames[stream_name];
+}
+
+Counter &ScheduledCoreOp::requested_read_frames()
+{
+ return m_requested_read_frames;
+}
+
+std::atomic_uint32_t &ScheduledCoreOp::requested_read_frames(const stream_name_t &stream_name)
+{
+ return m_requested_read_frames[stream_name];
+}
+
+Counter &ScheduledCoreOp::d2h_finished_transferred_frames()
+{
+ return m_d2h_finished_transferred_frames;
+}
+
+std::atomic_uint32_t &ScheduledCoreOp::d2h_finished_transferred_frames(const stream_name_t &stream_name)
+{
+ return m_d2h_finished_transferred_frames[stream_name];
+}
+
+Counter &ScheduledCoreOp::finished_read_frames()
+{
+ return m_finished_read_frames;
+}
+
+std::atomic_uint32_t &ScheduledCoreOp::finished_read_frames(const stream_name_t &stream_name)
+{
+ return m_finished_read_frames[stream_name];
+}
+
+uint32_t ScheduledCoreOp::finished_read_frames_min_value()
+{
+ return m_finished_read_frames.get_min_value();
+}
+
+const std::vector<stream_name_t> &ScheduledCoreOp::get_inputs_names()
+{
+ return m_inputs_names;
+}
+
+const std::vector<stream_name_t> &ScheduledCoreOp::get_outputs_names()
+{
+ return m_outputs_names;
+}
+
+void ScheduledCoreOp::push_device_index(uint32_t device_index)
+{
+ for (auto& stream_name : get_outputs_names()) {
+ m_output_streams_read_orders[stream_name].push(device_index);
+ }
+}
+
+uint32_t ScheduledCoreOp::pop_device_index(const stream_name_t &stream_name)
+{
+ assert(contains(m_output_streams_read_orders, stream_name));
+ assert(!m_output_streams_read_orders[stream_name].empty());
+ auto device_index = m_output_streams_read_orders[stream_name].front();
+ m_output_streams_read_orders[stream_name].pop();
+
+ return device_index;
+}
+
+bool ScheduledCoreOp::is_ready_to_switch()
+{
+ return m_ready_to_switch;
+}
+
+void ScheduledCoreOp::mark_ready_to_switch()
+{
+ m_ready_to_switch = true;
+}
+
+void ScheduledCoreOp::mark_unready_to_switch()
+{
+ m_ready_to_switch = false;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file network_group_scheduler.hpp
+ * @brief Class declaration for CoreOpsScheduler that schedules core-ops to be active depending on the scheduling algorithm.
+ **/
+
+#ifndef _HAILO_SCHEDULED_CORE_OP_HPP_
+#define _HAILO_SCHEDULED_CORE_OP_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/network_rate_calculator.hpp"
+
+#include "common/utils.hpp"
+
+#include "core_op/core_op.hpp"
+
+#include <condition_variable>
+#include <queue>
+
+
+namespace hailort
+{
+
+#define DEFAULT_SCHEDULER_TIMEOUT (std::chrono::milliseconds(0))
+#define DEFAULT_SCHEDULER_MIN_THRESHOLD (0)
+#define INVALID_DEVICE_ID (UINT32_MAX)
+
+using stream_name_t = std::string;
+using core_op_priority_t = uint8_t;
+
+#define SINGLE_CONTEXT_BATCH_SIZE (1)
+
+class Counter
+{
+public:
+ Counter() : m_map()
+ {};
+
+ void insert(const stream_name_t &name)
+ {
+ assert(!contains(m_map, name));
+ m_map[name] = 0;
+ }
+
+ std::atomic_uint32_t &operator [](const stream_name_t &name)
+ {
+ assert(contains(m_map, name));
+ return m_map[name];
+ }
+
+ void increase(const stream_name_t &name)
+ {
+ assert(contains(m_map, name));
+ m_map[name]++;
+ }
+
+ void decrease(const stream_name_t &name)
+ {
+ assert(contains(m_map, name));
+ if (0 != m_map[name]) {
+ m_map[name]--;
+ }
+ }
+
+ uint32_t get_min_value()
+ {
+ return get_min_value_of_unordered_map(m_map);
+ }
+
+ uint32_t get_max_value()
+ {
+ return get_max_value_of_unordered_map(m_map);
+ }
+
+ bool all_values_bigger_or_equal(uint32_t value)
+ {
+ for (const auto &pair : m_map) {
+ if (value > pair.second) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool empty()
+ {
+ for (const auto &pair : m_map) {
+ if (0 != pair.second) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+private:
+ std::unordered_map<stream_name_t, std::atomic_uint32_t> m_map;
+};
+
+class ScheduledCoreOp
+{
+public:
+ static Expected<std::shared_ptr<ScheduledCoreOp>> create(std::shared_ptr<CoreOp> added_core_op, StreamInfoVector &stream_infos);
+
+ virtual ~ScheduledCoreOp() = default;
+ ScheduledCoreOp(const ScheduledCoreOp &other) = delete;
+ ScheduledCoreOp &operator=(const ScheduledCoreOp &other) = delete;
+ ScheduledCoreOp &operator=(ScheduledCoreOp &&other) = delete;
+ ScheduledCoreOp(ScheduledCoreOp &&other) noexcept = delete;
+
+ bool has_enough_space_in_read_buffers(uint32_t ongoing_frames);
+ uint16_t get_min_input_buffers_count(uint32_t device_count);
+ bool has_input_written_most_frames(const std::string &stream_name);
+ std::unordered_map<stream_name_t, uint32_t> total_written_frames_count();
+ bool has_pending_frames();
+ bool can_stream_read(const std::string &stream_name);
+ bool can_stream_write(const std::string &stream_name);
+ bool use_dynamic_batch_flow();
+ bool has_core_op_drained_everything();
+ void decrease_current_core_op_counters();
+ uint32_t get_pre_transfer_h2d_frames_count();
+
+ bool is_ready_to_switch();
+ void mark_ready_to_switch();
+ void mark_unready_to_switch();
+
+ std::string get_core_op_name();
+
+ std::shared_ptr<CoreOp> get_core_op();
+
+ void mark_frame_sent();
+
+ std::chrono::time_point<std::chrono::steady_clock> get_last_run_timestamp();
+ void set_last_run_timestamp(const std::chrono::time_point<std::chrono::steady_clock> ×tamp);
+
+ Expected<std::chrono::milliseconds> get_timeout(const stream_name_t &stream_name = "");
+ hailo_status set_timeout(const std::chrono::milliseconds &timeout, const stream_name_t &stream_name = "");
+ Expected<uint32_t> get_threshold(const stream_name_t &stream_name);
+ hailo_status set_threshold(uint32_t threshold, const stream_name_t &stream_name = "");
+
+ core_op_priority_t get_priority();
+ void set_priority(core_op_priority_t priority);
+
+ uint32_t get_last_device_index();
+ void set_last_device_index(uint32_t device_index);
+
+ uint16_t get_max_batch_size();
+
+ Counter &requested_write_frames();
+ std::atomic_uint32_t &requested_write_frames(const stream_name_t &stream_name);
+ Counter &finished_write_frames();
+ std::atomic_uint32_t &finished_write_frames(const stream_name_t &stream_name);
+ uint32_t finished_write_frames_min_value();
+
+ Counter &h2d_finished_transferred_frames();
+ std::atomic_uint32_t &h2d_finished_transferred_frames(const stream_name_t &stream_name);
+
+ Counter &requested_read_frames();
+ std::atomic_uint32_t &requested_read_frames(const stream_name_t &stream_name);
+
+ Counter &d2h_finished_transferred_frames();
+ std::atomic_uint32_t &d2h_finished_transferred_frames(const stream_name_t &stream_name);
+ Counter &finished_read_frames();
+ std::atomic_uint32_t &finished_read_frames(const stream_name_t &stream_name);
+ uint32_t finished_read_frames_min_value();
+
+ const std::vector<stream_name_t> &get_outputs_names();
+ const std::vector<stream_name_t> &get_inputs_names();
+
+ bool is_nms()
+ {
+ return m_is_nms;
+ }
+
+ void push_device_index(uint32_t device_index);
+ uint32_t pop_device_index(const stream_name_t &stream_name);
+
+ ScheduledCoreOp(std::shared_ptr<CoreOp> core_op, std::chrono::milliseconds timeout,
+ uint16_t max_batch_size, StreamInfoVector &stream_infos, std::string core_op_name);
+
+private:
+ std::shared_ptr<CoreOp> m_core_op;
+
+ std::chrono::time_point<std::chrono::steady_clock> m_last_run_time_stamp;
+ std::chrono::milliseconds m_timeout;
+
+ std::atomic_bool m_frame_was_sent;
+ uint16_t m_max_batch_size;
+
+ Counter m_requested_write_frames; // 'wait_for_write()' has been called
+ Counter m_finished_write_frames; // 'signal_finished_write()' has been called - frame is written in buffer (writes are a-sync)
+
+ Counter m_h2d_finished_transferred_frames; // Frame has been transferred to device (intrpt was raised)
+
+ Counter m_requested_read_frames; // 'wait_for_read()' has been called
+
+ Counter m_d2h_finished_transferred_frames; // Frame has been transferred from device (intrpt was raised)
+ Counter m_finished_read_frames; // 'signal_finish_read()' has been called - user finished getting the frame
+
+ std::unordered_map<stream_name_t, std::atomic_uint32_t> m_min_threshold_per_stream;
+
+ core_op_priority_t m_priority;
+
+ std::atomic_uint32_t m_last_device_index;
+
+ std::string m_core_op_name;
+
+ std::vector<stream_name_t> m_inputs_names;
+ std::vector<stream_name_t> m_outputs_names;
+
+ std::unordered_map<stream_name_t, std::queue<uint32_t>> m_output_streams_read_orders;
+
+ bool m_is_nms;
+
+ // TODO: Remove this flag when the old scheduling mode will be deprecated
+ std::atomic_bool m_ready_to_switch;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SCHEDULED_CORE_OP_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file scheduled_stream.hpp
+ * @brief Internal stream implementation for scheduled streams
+ *
+ **/
+
+#ifndef HAILO_SCHEDULED_STREAM_HPP_
+#define HAILO_SCHEDULED_STREAM_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "vdevice/vdevice_internal.hpp"
+#include "vdevice/vdevice_stream.hpp"
+#include "vdma/vdma_device.hpp"
+
+
+namespace hailort
+{
+
+class ScheduledInputStream : public InputVDeviceBaseStream {
+public:
+ ScheduledInputStream(
+ std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
+ const scheduler_core_op_handle_t &core_op_handle,
+ EventPtr &&core_op_activated_event,
+ const LayerInfo &layer_info,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler,
+ hailo_status &status) :
+ InputVDeviceBaseStream(std::move(streams), std::move(core_op_activated_event), layer_info, status),
+ m_core_op_handle(core_op_handle),
+ m_core_ops_scheduler(core_ops_scheduler)
+ {}
+
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual bool is_scheduled() override { return true; };
+
+ virtual void notify_all() override
+ {
+ auto scheduler = m_core_ops_scheduler.lock();
+ if (nullptr == scheduler) {
+ LOGGER__CRITICAL("Failed to acquire scheduler");
+ return;
+ }
+ scheduler->notify_all();
+
+ for (auto &stream : m_streams) {
+ stream.get().notify_all();
+ }
+ }
+
+protected:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer,
+ const std::function<bool()> &should_cancel = []() { return false; });
+
+ Expected<size_t> sync_write_raw_buffer_impl(const MemoryView &buffer, scheduler_core_op_handle_t core_op_handle,
+ const std::function<bool()> &should_cancel);
+
+ scheduler_core_op_handle_t m_core_op_handle;
+ CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
+
+private:
+ hailo_status abort_impl(scheduler_core_op_handle_t core_op_handle);
+ hailo_status clear_abort_impl(scheduler_core_op_handle_t core_op_handle);
+};
+
+class ScheduledOutputStream : public OutputVDeviceBaseStream {
+public:
+ ScheduledOutputStream(
+ std::vector<std::reference_wrapper<VdmaOutputStream>> &&streams,
+ const scheduler_core_op_handle_t &core_op_handle,
+ const LayerInfo &layer_info,
+ EventPtr &&core_op_activated_event,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler,
+ hailo_status &status) :
+ OutputVDeviceBaseStream(std::move(streams), layer_info, std::move(core_op_activated_event), status),
+ m_core_op_handle(core_op_handle),
+ m_core_ops_scheduler(core_ops_scheduler)
+ {}
+
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual bool is_scheduled() override { return true; };
+
+protected:
+ virtual hailo_status read(MemoryView buffer) override;
+ hailo_status read_impl(MemoryView buffer, scheduler_core_op_handle_t core_op_handle);
+
+ scheduler_core_op_handle_t m_core_op_handle;
+ CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
+
+private:
+ hailo_status abort_impl(scheduler_core_op_handle_t core_op_handle);
+ hailo_status clear_abort_impl(scheduler_core_op_handle_t core_op_handle);
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_SCHEDULED_STREAM_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file scheduler_base.hpp
+ * @brief Class declaration for scheduler base class.
+ **/
+
+#ifndef _HAILO_SCHEDULER_BASE_HPP_
+#define _HAILO_SCHEDULER_BASE_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/utils.hpp"
+#include "common/filesystem.hpp"
+
+#include <condition_variable>
+
+
+namespace hailort
+{
+
+#define DEFAULT_SCHEDULER_TIMEOUT (std::chrono::milliseconds(0))
+#define DEFAULT_SCHEDULER_MIN_THRESHOLD (0)
+
+#define INVALID_CORE_OP_HANDLE (UINT32_MAX)
+#define INVALID_DEVICE_ID (UINT32_MAX)
+
+using scheduler_core_op_handle_t = uint32_t;
+using core_op_priority_t = uint8_t;
+
+using stream_name_t = std::string;
+
+struct ActiveDeviceInfo {
+ ActiveDeviceInfo(uint32_t device_id, const std::string &device_bdf_id, const std::string &device_arch) :
+ current_core_op_handle(INVALID_CORE_OP_HANDLE), next_core_op_handle(INVALID_CORE_OP_HANDLE), is_switching_core_op(false),
+ current_batch_size(0), current_cycle_requested_transferred_frames_h2d(), current_cycle_finished_transferred_frames_d2h(),
+ current_cycle_finished_read_frames_d2h(), device_id(device_id), device_bdf_id(device_bdf_id), device_arch(device_arch)
+ {}
+ scheduler_core_op_handle_t current_core_op_handle;
+ scheduler_core_op_handle_t next_core_op_handle;
+ std::atomic_bool is_switching_core_op;
+ std::atomic_uint32_t current_batch_size;
+ std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_requested_transferred_frames_h2d;
+ std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_finished_transferred_frames_d2h;
+ std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_finished_read_frames_d2h;
+ uint32_t device_id;
+ std::string device_bdf_id;
+ std::string device_arch;
+};
+
+
+class SchedulerBase
+{
+public:
+ hailo_scheduling_algorithm_t algorithm()
+ {
+ return m_algorithm;
+ }
+
+ struct ReadyInfo {
+ bool threshold = false;
+ bool timeout = false;
+ bool is_ready = false;
+ };
+
+ virtual ReadyInfo is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold) = 0;
+ virtual bool has_core_op_drained_everything(const scheduler_core_op_handle_t &core_op_handle, uint32_t device_id) = 0;
+
+ virtual uint32_t get_device_count() const
+ {
+ return static_cast<uint32_t>(m_devices.size());
+ }
+
+ virtual std::shared_ptr<ActiveDeviceInfo> get_devices_info(uint32_t device_id)
+ {
+ return m_devices[device_id];
+ }
+
+ virtual std::map<core_op_priority_t, std::vector<scheduler_core_op_handle_t>> get_core_op_priority_map()
+ {
+ return m_core_op_priority;
+ }
+
+ virtual scheduler_core_op_handle_t get_last_choosen_core_op(core_op_priority_t priority)
+ {
+ return m_last_choosen_core_op[priority];
+ }
+
+ virtual void set_last_choosen_core_op(const core_op_priority_t priority, const scheduler_core_op_handle_t &core_op_handle)
+ {
+ m_last_choosen_core_op[priority] = core_op_handle;
+ }
+
+protected:
+ SchedulerBase(hailo_scheduling_algorithm_t algorithm, uint32_t device_count, std::vector<std::string> &devices_bdf_id,
+ std::vector<std::string> &devices_arch) : m_algorithm(algorithm)
+ {
+ for (uint32_t i = 0; i < device_count; i++) {
+ m_devices.push_back(make_shared_nothrow<ActiveDeviceInfo>(i, devices_bdf_id[i], devices_arch[i]));
+ }
+ };
+
+ virtual ~SchedulerBase() = default;
+ SchedulerBase(const SchedulerBase &other) = delete;
+ SchedulerBase &operator=(const SchedulerBase &other) = delete;
+ SchedulerBase &operator=(SchedulerBase &&other) = delete;
+ SchedulerBase(SchedulerBase &&other) noexcept = delete;
+
+ std::vector<std::shared_ptr<ActiveDeviceInfo>> m_devices;
+ std::map<core_op_priority_t, std::vector<scheduler_core_op_handle_t>> m_core_op_priority;
+
+ hailo_scheduling_algorithm_t m_algorithm;
+ std::unordered_map<core_op_priority_t, scheduler_core_op_handle_t> m_last_choosen_core_op;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SCHEDULER_BASE_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file scheduler_mon.hpp
+ * @brief Defines for scheduler monitor of networks.
+ **/
+
+#ifndef _HAILO_SCHEDULER_MON_HPP_
+#define _HAILO_SCHEDULER_MON_HPP_
+
+#include "hailo/hailort.h"
+
+#include "common/filesystem.hpp"
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4244 4267 4127)
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+#endif
+#include "scheduler_mon.pb.h"
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#else
+#pragma GCC diagnostic pop
+#endif
+
+#include <iostream>
+#include <string>
+
+
+namespace hailort
+{
+
+#define SCHEDULER_MON_TMP_DIR ("/tmp/hmon_files/")
+#define SCHEDULER_MON_ENV_VAR ("HAILO_MONITOR")
+#define DEFAULT_SCHEDULER_MON_INTERVAL (std::chrono::seconds(1))
+#define SCHEDULER_MON_NAN_VAL (-1)
+
+class SchedulerMon
+{
+public:
+
+ static bool should_monitor()
+ {
+ #if defined(__GNUC__)
+ auto mon_var = std::getenv(SCHEDULER_MON_ENV_VAR);
+ return (mon_var != nullptr) && strncmp(mon_var, "1", 1) == 0;
+ #else
+ // TODO: HRT-7304 - Add support for windows
+ return false;
+ #endif
+ }
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SCHEDULER_MON_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file scheduler_oracle.cpp
+ * @brief:
+ **/
+
+#include "vdevice/scheduler/scheduler_oracle.hpp"
+#include "utils/profiler/tracer_macros.hpp"
+
+
+namespace hailort
+{
+
+bool CoreOpsSchedulerOracle::choose_next_model(SchedulerBase &scheduler, uint32_t device_id, bool check_threshold)
+{
+ auto device_info = scheduler.get_devices_info(device_id);
+ auto priority_map = scheduler.get_core_op_priority_map();
+ for (auto iter = priority_map.rbegin(); iter != priority_map.rend(); ++iter) {
+ auto priority_group_size = iter->second.size();
+
+ for (uint32_t i = 0; i < priority_group_size; i++) {
+ uint32_t index = scheduler.get_last_choosen_core_op(iter->first) + i + 1;
+ index %= static_cast<uint32_t>(priority_group_size);
+ auto core_op_handle = iter->second[index];
+ if (!is_core_op_active(scheduler, core_op_handle)) {
+ auto ready_info = scheduler.is_core_op_ready(core_op_handle, check_threshold);
+ if (ready_info.is_ready) {
+ TRACE(ChooseCoreOpTrace, "", core_op_handle, ready_info.threshold, ready_info.timeout, iter->first);
+ device_info->is_switching_core_op = true;
+ device_info->next_core_op_handle = core_op_handle;
+ scheduler.set_last_choosen_core_op(iter->first, index);
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+// TODO: return device handle instead index
+uint32_t CoreOpsSchedulerOracle::get_avail_device(SchedulerBase &scheduler, scheduler_core_op_handle_t core_op_handle)
+{
+ const bool check_threshold = false;
+ auto device_count = scheduler.get_device_count();
+
+ // Check if should be next
+ /* Checking (INVALID_CORE_OP_HANDLE == m_current_core_op) for activating the first time the scheduler is running.
+ In this case we don't want to check threshold. */
+ for (uint32_t device_index = 0; device_index < device_count; device_index++) {
+ auto active_device_info = scheduler.get_devices_info(device_index);
+ if (active_device_info->is_switching_core_op && scheduler.has_core_op_drained_everything(active_device_info->current_core_op_handle, active_device_info->device_id) &&
+ (((INVALID_CORE_OP_HANDLE == active_device_info->current_core_op_handle) &&
+ scheduler.is_core_op_ready(core_op_handle, check_threshold).is_ready) ||
+ (active_device_info->next_core_op_handle == core_op_handle))) {
+ return active_device_info->device_id;
+ }
+ }
+
+ // Check if device Idle
+ // We dont need to check if the core op is ready, because the device is idle and if we arrive here frame is already sent and as a space in the output buffer.
+ for (uint32_t device_index = 0; device_index < device_count; device_index++) {
+ auto active_device_info = scheduler.get_devices_info(device_index);
+ if (!active_device_info->is_switching_core_op && scheduler.has_core_op_drained_everything(active_device_info->current_core_op_handle, active_device_info->device_id)) {
+ return active_device_info->device_id;
+ }
+ }
+
+ return INVALID_DEVICE_ID;
+}
+
+bool CoreOpsSchedulerOracle::should_stop_streaming(SchedulerBase &scheduler, core_op_priority_t core_op_priority)
+{
+ auto priority_map = scheduler.get_core_op_priority_map();
+ for (auto iter = priority_map.rbegin(); (iter != priority_map.rend()) && (iter->first >= core_op_priority); ++iter) {
+ auto priority_group_size = iter->second.size();
+
+ for (uint32_t i = 0; i < priority_group_size; i++) {
+ uint32_t index = scheduler.get_last_choosen_core_op(iter->first) + i + 1;
+ index %= static_cast<uint32_t>(priority_group_size);
+ auto core_op_handle = iter->second[index];
+ // We dont want to stay with the same network group if there is a other qualified network group
+ if ((!is_core_op_active(scheduler, core_op_handle)) && scheduler.is_core_op_ready(core_op_handle, true).is_ready) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool CoreOpsSchedulerOracle::is_core_op_active(SchedulerBase &scheduler, scheduler_core_op_handle_t core_op_handle)
+{
+ auto device_count = scheduler.get_device_count();
+ for (uint32_t device_index = 0; device_index < device_count; device_index++) {
+ auto active_device_info = scheduler.get_devices_info(device_index);
+ if (core_op_handle == active_device_info->current_core_op_handle) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file scheduler_oracle.hpp
+ * @brief
+ **/
+
+#ifndef _HAILO_SCHEDULER_ORACLE_HPP_
+#define _HAILO_SCHEDULER_ORACLE_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/utils.hpp"
+
+#include "vdevice/scheduler/scheduler_base.hpp"
+
+
+namespace hailort
+{
+
+class CoreOpsSchedulerOracle
+{
+public:
+ static bool choose_next_model(SchedulerBase &scheduler, uint32_t device_id, bool check_threshold);
+ static uint32_t get_avail_device(SchedulerBase &scheduler, scheduler_core_op_handle_t core_op_handle);
+ static bool should_stop_streaming(SchedulerBase &scheduler, core_op_priority_t core_op_priority);
+
+private:
+ CoreOpsSchedulerOracle() {}
+ // TODO: Consider returning a vector of devices (we can use this function in other places)
+ static bool is_core_op_active(SchedulerBase &scheduler, scheduler_core_op_handle_t core_op_handle);
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SCHEDULER_ORACLE_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/vdevice.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "vdevice/vdevice_internal.hpp"
+#include "vdevice/vdevice_core_op.hpp"
+
+#include "vdma/pcie/pcie_device.hpp"
+#include "vdma/integrated/integrated_device.hpp"
+#include "utils/shared_resource_manager.hpp"
+#include "network_group/network_group_internal.hpp"
+#include "core_op/core_op.hpp"
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+#include "service/rpc_client_utils.hpp"
+#include "rpc/rpc_definitions.hpp"
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+
+namespace hailort
+{
+
+template<>
+std::string SharedResourceManager<std::string, VDeviceBase>::unique_key()
+{
+ return HAILO_UNIQUE_VDEVICE_GROUP_ID;
+}
+
+static hailo_status validate_device_ids_match(const hailo_vdevice_params_t ¶ms,
+ const std::set<std::string> &old_ids)
+{
+ std::set<std::string> new_ids;
+ for (uint32_t i = 0; i < params.device_count; i++) {
+ // TODO: maybe needs to normalize domain?
+ new_ids.insert(params.device_ids[i].id);
+ }
+
+ CHECK(old_ids == new_ids, HAILO_INVALID_OPERATION, "Different VDevice ids used by group_id {}", (nullptr == params.group_id ? "NULL" : params.group_id));
+ return HAILO_SUCCESS;
+}
+
+hailo_status validate_same_vdevice(const hailo_vdevice_params_t ¶ms, const VDevice &vdevice)
+{
+ // Validate device ids
+ if (params.device_ids != nullptr) {
+ auto old_ids = vdevice.get_physical_devices_ids();
+ CHECK_EXPECTED_AS_STATUS(old_ids);
+ std::set<std::string> old_ids_set(old_ids->begin(), old_ids->end());
+
+ auto status = validate_device_ids_match(params, old_ids_set);
+ CHECK_SUCCESS(status);
+ }
+
+ // Validate count matches
+ auto physical_devices = vdevice.get_physical_devices();
+ CHECK_EXPECTED_AS_STATUS(physical_devices);
+ CHECK(params.device_count == physical_devices->size(), HAILO_INVALID_OPERATION,
+ "Different VDevice device count used by group_id {}", params.group_id);
+ return HAILO_SUCCESS;
+}
+
+void release_resource_if(bool condition, uint32_t key) {
+ if (condition) {
+ SharedResourceManager<std::string, VDeviceBase>::get_instance().release_resource(key);
+ }
+}
+
+Expected<NetworkGroupsParamsMap> VDevice::create_configure_params(Hef &hef) const
+{
+ auto stream_interface = get_default_streams_interface();
+ CHECK_EXPECTED(stream_interface, "Failed to get default streams interface");
+
+ return hef.create_configure_params(stream_interface.release());
+}
+
+Expected<ConfigureNetworkParams> VDevice::create_configure_params(Hef &hef, const std::string &network_group_name) const
+{
+ auto stream_interface = get_default_streams_interface();
+ CHECK_EXPECTED(stream_interface, "Failed to get default streams interface");
+
+ return hef.create_configure_params(stream_interface.release(), network_group_name);
+}
+
+VDeviceHandle::VDeviceHandle(uint32_t handle) : m_handle(handle)
+{}
+
+VDeviceHandle::~VDeviceHandle()
+{
+ SharedResourceManager<std::string, VDeviceBase>::get_instance().release_resource(m_handle);
+}
+
+Expected<std::unique_ptr<VDevice>> VDeviceHandle::create(const hailo_vdevice_params_t ¶ms)
+{
+ auto status = VDeviceBase::validate_params(params);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
+ auto create = [¶ms]() {
+ return VDeviceBase::create(params);
+ };
+ auto expected_handle = manager.register_resource(params.group_id == nullptr ? "" : std::string(params.group_id), create);
+ CHECK_EXPECTED(expected_handle);
+
+ auto expected_vdevice_base = manager.resource_lookup(expected_handle.value());
+ CHECK_EXPECTED(expected_vdevice_base);
+
+ auto same_vdevice_status = validate_same_vdevice(params, *expected_vdevice_base.value());
+ release_resource_if(same_vdevice_status != HAILO_SUCCESS, expected_handle.value());
+ CHECK_SUCCESS_AS_EXPECTED(same_vdevice_status);
+
+ auto handle_vdevice = std::unique_ptr<VDeviceHandle>(new VDeviceHandle(expected_handle.value()));
+ CHECK_AS_EXPECTED(handle_vdevice != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::unique_ptr<VDevice>(std::move(handle_vdevice));
+}
+
+Expected<ConfiguredNetworkGroupVector> VDeviceHandle::configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params)
+{
+ auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
+ auto vdevice = manager.resource_lookup(m_handle);
+ CHECK_EXPECTED(vdevice);
+
+ return vdevice.value()->configure(hef, configure_params);
+}
+
+Expected<std::vector<std::reference_wrapper<Device>>> VDeviceHandle::get_physical_devices() const
+{
+ auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
+ auto vdevice = manager.resource_lookup(m_handle);
+ CHECK_EXPECTED(vdevice);
+
+ return vdevice.value()->get_physical_devices();
+}
+
+Expected<std::vector<std::string>> VDeviceHandle::get_physical_devices_ids() const
+{
+ auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
+ auto vdevice = manager.resource_lookup(m_handle);
+ CHECK_EXPECTED(vdevice);
+
+ return vdevice.value()->get_physical_devices_ids();
+}
+
+Expected<hailo_stream_interface_t> VDeviceHandle::get_default_streams_interface() const
+{
+ auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
+ auto vdevice = manager.resource_lookup(m_handle);
+ CHECK_EXPECTED(vdevice);
+
+ return vdevice.value()->get_default_streams_interface();
+}
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+
+VDeviceClient::VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle)
+ : m_client(std::move(client))
+ , m_handle(handle)
+{}
+
+VDeviceClient::~VDeviceClient()
+{
+ // Note: We clear m_network_groups to prevent double destruction on ConfiguredNetworkGroupBase.
+ // Explanation: When the VDeviceClient is destructed, it's members are destructed last.
+ // That would cause the m_network_groups (vector of ConfiguredNetworkGroupClient) to be destructed after the vdevice in the service.
+ // The vdevice in the service will destruct the ConfiguredNetworkGroupBase,
+ // and then the ConfiguredNetworkGroupClient destructor will be called - causing double destruction on ConfiguredNetworkGroupBase.
+ m_network_groups.clear();
+ auto reply = m_client->VDevice_release(m_handle);
+ if (reply != HAILO_SUCCESS) {
+ LOGGER__CRITICAL("VDevice_release failed!");
+ }
+}
+
+hailo_status VDeviceClient::before_fork()
+{
+ HailoRtRpcClientUtils::get_instance().before_fork();
+ m_client.reset();
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceClient::create_client()
+{
+ grpc::ChannelArguments ch_args;
+ ch_args.SetMaxReceiveMessageSize(-1);
+ auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+ CHECK_NOT_NULL(channel, HAILO_INTERNAL_FAILURE);
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK_NOT_NULL(client, HAILO_INTERNAL_FAILURE);
+ m_client = std::move(client);
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceClient::after_fork_in_parent()
+{
+ HailoRtRpcClientUtils::get_instance().after_fork_in_parent();
+ return create_client();
+}
+
+hailo_status VDeviceClient::after_fork_in_child()
+{
+ HailoRtRpcClientUtils::get_instance().after_fork_in_child();
+ auto status = create_client();
+ CHECK_SUCCESS(status);
+ auto expected_dup_handle = m_client->VDevice_dup_handle(OsUtils::get_curr_pid(), m_handle);
+ CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
+ m_handle = expected_dup_handle.value();
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<VDevice>> VDeviceClient::create(const hailo_vdevice_params_t ¶ms)
+{
+ grpc::ChannelArguments ch_args;
+ ch_args.SetMaxReceiveMessageSize(-1);
+ auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+ CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
+
+ auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
+ CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+ auto init_status = HailoRtRpcClientUtils::get_instance().init_client_service_communication();
+ CHECK_SUCCESS_AS_EXPECTED(init_status);
+ auto reply = client->VDevice_create(params, OsUtils::get_curr_pid());
+ CHECK_EXPECTED(reply);
+
+ auto client_vdevice = std::unique_ptr<VDeviceClient>(new VDeviceClient(std::move(client), reply.value()));
+ CHECK_AS_EXPECTED(client_vdevice != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::unique_ptr<VDevice>(std::move(client_vdevice));
+}
+
+Expected<ConfiguredNetworkGroupVector> VDeviceClient::configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params)
+{
+ auto networks_handles = m_client->VDevice_configure(m_handle, hef, OsUtils::get_curr_pid(), configure_params);
+ CHECK_EXPECTED(networks_handles);
+
+ ConfiguredNetworkGroupVector networks;
+ networks.reserve(networks_handles->size());
+ for (auto &handle : networks_handles.value()) {
+ auto expected_client = HailoRtRpcClientUtils::create_client();
+ CHECK_EXPECTED(expected_client);
+
+ auto client = expected_client.release();
+ auto network_group = make_shared_nothrow<ConfiguredNetworkGroupClient>(std::move(client), handle);
+ CHECK_NOT_NULL_AS_EXPECTED(network_group, HAILO_OUT_OF_HOST_MEMORY);
+
+ networks.emplace_back(network_group);
+ m_network_groups.push_back(network_group);
+ }
+ return networks;
+}
+
+Expected<std::vector<std::reference_wrapper<Device>>> VDeviceClient::get_physical_devices() const
+{
+ LOGGER__ERROR("ConfiguredNetworkGroup::get_physical_devices function is not supported when using multi-process service");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+Expected<std::vector<std::string>> VDeviceClient::get_physical_devices_ids() const
+{
+ return m_client->VDevice_get_physical_devices_ids(m_handle);
+}
+
+Expected<hailo_stream_interface_t> VDeviceClient::get_default_streams_interface() const
+{
+ return m_client->VDevice_get_default_streams_interface(m_handle);
+}
+
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+
+Expected<std::unique_ptr<VDevice>> VDevice::create(const hailo_vdevice_params_t ¶ms)
+{
+ std::unique_ptr<VDevice> vdevice;
+ if (params.multi_process_service) {
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+ auto expected_vdevice = VDeviceClient::create(params);
+ CHECK_EXPECTED(expected_vdevice);
+ vdevice = expected_vdevice.release();
+#else
+ LOGGER__ERROR("multi_process_service requires service compilation with HAILO_BUILD_SERVICE");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+ } else {
+ auto expected_vdevice = VDeviceHandle::create(params);
+ CHECK_EXPECTED(expected_vdevice);
+ vdevice = expected_vdevice.release();
+ }
+ // Upcasting to VDevice unique_ptr
+ auto vdevice_ptr = std::unique_ptr<VDevice>(vdevice.release());
+ return vdevice_ptr;
+}
+
+Expected<std::unique_ptr<VDevice>> VDevice::create()
+{
+ auto params = HailoRTDefaults::get_vdevice_params();
+ return create(params);
+}
+
+Expected<std::unique_ptr<VDevice>> VDevice::create(const std::vector<std::string> &device_ids)
+{
+ auto params = HailoRTDefaults::get_vdevice_params();
+
+ auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
+ CHECK_EXPECTED(device_ids_vector);
+
+ params.device_ids = device_ids_vector->data();
+ params.device_count = static_cast<uint32_t>(device_ids_vector->size());
+
+ return create(params);
+}
+
+hailo_status VDeviceBase::validate_params(const hailo_vdevice_params_t ¶ms)
+{
+ CHECK(0 != params.device_count, HAILO_INVALID_ARGUMENT,
+ "VDevice creation failed. invalid device_count ({}).", params.device_count);
+
+ if (params.device_ids != nullptr) {
+ for (uint32_t i = 0; i < params.device_count; i++) {
+ auto dev_type = Device::get_device_type(params.device_ids[i].id);
+ CHECK_EXPECTED_AS_STATUS(dev_type);
+ CHECK((Device::Type::ETH != dev_type.value() || (1 == params.device_count)), HAILO_INVALID_ARGUMENT,
+ "VDevice over ETH is supported for 1 device. Passed device_count: {}", params.device_count);
+ CHECK((Device::Type::ETH != dev_type.value() || (HAILO_SCHEDULING_ALGORITHM_NONE == params.scheduling_algorithm)), HAILO_INVALID_ARGUMENT,
+ "VDevice over ETH is not supported when scheduler is enabled.");
+ }
+ }
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<VDeviceBase>> VDeviceBase::create(const hailo_vdevice_params_t ¶ms)
+{
+ auto devices_expected = create_devices(params);
+ CHECK_EXPECTED(devices_expected);
+ auto devices = devices_expected.release();
+
+ std::vector<std::string> device_ids;
+ device_ids.reserve(params.device_count);
+ std::vector<std::string> device_archs;
+ device_archs.reserve(params.device_count);
+
+ std::string vdevice_ids = "VDevice Infos:";
+ for (const auto &device : devices) {
+ auto id_info_str = device->get_dev_id();
+ device_ids.emplace_back(id_info_str);
+ auto device_arch = device->get_architecture();
+ CHECK_EXPECTED(device_arch);
+ auto device_arch_str = HailoRTCommon::get_device_arch_str(device_arch.value());
+ device_archs.emplace_back(device_arch_str);
+ vdevice_ids += " " + std::string(id_info_str);
+ }
+ LOGGER__INFO("{}", vdevice_ids);
+
+
+ CoreOpsSchedulerPtr scheduler_ptr;
+ if (HAILO_SCHEDULING_ALGORITHM_NONE != params.scheduling_algorithm) {
+ if (HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN == params.scheduling_algorithm) {
+ auto core_ops_scheduler = CoreOpsScheduler::create_round_robin(params.device_count, device_ids, device_archs);
+ CHECK_EXPECTED(core_ops_scheduler);
+ scheduler_ptr = core_ops_scheduler.release();
+ } else {
+ LOGGER__ERROR("Unsupported scheduling algorithm");
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+ }
+
+ auto vdevice = std::unique_ptr<VDeviceBase>(new (std::nothrow) VDeviceBase(std::move(devices), scheduler_ptr));
+ CHECK_AS_EXPECTED(nullptr != vdevice, HAILO_OUT_OF_HOST_MEMORY);
+
+ return vdevice;
+}
+
+Expected<ConfiguredNetworkGroupVector> VDeviceBase::configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params)
+{
+ std::unique_lock<std::mutex> lock(m_mutex);
+ auto start_time = std::chrono::steady_clock::now();
+
+ auto local_config_params = create_local_config_params(hef, configure_params);
+ CHECK_EXPECTED(local_config_params);
+
+ ConfiguredNetworkGroupVector added_network_groups;
+ added_network_groups.reserve(configure_params.size());
+
+ for (const auto &network_params_pair : local_config_params.value()) {
+ std::vector<std::shared_ptr<CoreOp>> core_ops;
+ std::shared_ptr<VDeviceCoreOp> identical_core_op = nullptr;
+ if (m_core_ops_scheduler && PipelineMultiplexer::should_use_multiplexer()) {
+ for (auto &network_group : m_vdevice_core_ops) {
+ if ((network_group->equals(hef, network_params_pair)) && (1 == network_group->get_input_streams().size())) {
+ // TODO (HRT-8634): Support multi-inputs NGs (multi networks)
+ identical_core_op = network_group;
+ break;
+ }
+ }
+ }
+ std::shared_ptr<VDeviceCoreOp> vdevice_netwrok_group = nullptr;
+ if (identical_core_op) {
+ auto vdevice_netwrok_group_exp = VDeviceCoreOp::duplicate(identical_core_op);
+ CHECK_EXPECTED(vdevice_netwrok_group_exp);
+
+ vdevice_netwrok_group = vdevice_netwrok_group_exp.release();
+ vdevice_netwrok_group->set_core_op_handle(identical_core_op->core_op_handle());
+ vdevice_netwrok_group->create_vdevice_streams_from_duplicate(identical_core_op);
+ } else {
+ auto vdevice_netwrok_group_expected = create_vdevice_network_group(hef, network_params_pair);
+ CHECK_EXPECTED(vdevice_netwrok_group_expected);
+ vdevice_netwrok_group = vdevice_netwrok_group_expected.release();
+ m_vdevice_core_ops.push_back(vdevice_netwrok_group);
+ }
+
+ core_ops.push_back(vdevice_netwrok_group);
+ auto net_flow_ops = hef.pimpl->post_process_ops(vdevice_netwrok_group->name());
+ auto net_group_expected = ConfiguredNetworkGroupBase::create(network_params_pair.second, std::move(core_ops), std::move(net_flow_ops));
+ CHECK_EXPECTED(net_group_expected);
+ auto network_group_ptr = net_group_expected.release();
+
+ added_network_groups.push_back(network_group_ptr);
+ m_network_groups.push_back(network_group_ptr);
+ }
+
+ auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
+ LOGGER__INFO("Configuring HEF on VDevice took {} milliseconds", elapsed_time_ms);
+
+ return added_network_groups;
+}
+
+Expected<hailo_stream_interface_t> VDeviceBase::get_default_streams_interface() const
+{
+ auto stream_interface = m_devices[0]->get_default_streams_interface();
+ CHECK_EXPECTED(stream_interface);
+ for (auto &dev : m_devices) {
+ auto current_stream_interface = dev->get_default_streams_interface();
+ CHECK_EXPECTED(current_stream_interface);
+ CHECK_AS_EXPECTED(*current_stream_interface == *stream_interface, HAILO_INTERNAL_FAILURE,
+ "vDevice is supported only with homogeneous device type");
+ }
+ return stream_interface.release();
+}
+
+Expected<std::vector<std::unique_ptr<Device>>> VDeviceBase::create_devices(const hailo_vdevice_params_t ¶ms)
+{
+ std::vector<std::unique_ptr<Device>> devices;
+ devices.reserve(params.device_count);
+
+ const bool user_specific_devices = (params.device_ids != nullptr);
+
+ auto device_ids = get_device_ids(params);
+ CHECK_EXPECTED(device_ids);
+
+ for (const auto &device_id : device_ids.value()) {
+ if (devices.size() == params.device_count) {
+ break;
+ }
+ auto device = Device::create(device_id);
+ CHECK_EXPECTED(device);
+
+ // Validate That if (device_count != 1), device arch is not H8L. May be changed in SDK-28729
+ if (1 != params.device_count) {
+ auto device_arch = device.value()->get_architecture();
+ CHECK_EXPECTED(device_arch);
+ CHECK_AS_EXPECTED(HAILO_ARCH_HAILO8L != device_arch.value(), HAILO_INVALID_OPERATION,
+ "VDevice with multiple devices is not supported on HAILO_ARCH_HAILO8L. device {} is HAILO_ARCH_HAILO8L", device_id);
+ }
+
+ auto dev_type = Device::get_device_type(device_id);
+ CHECK_EXPECTED(dev_type);
+ if ((Device::Type::INTEGRATED == dev_type.value()) || (Device::Type::PCIE == dev_type.value())) {
+ auto status = dynamic_cast<VdmaDevice&>(*device.value()).mark_as_used();
+ if (!user_specific_devices && (HAILO_DEVICE_IN_USE == status)) {
+ // Continue only if the user didn't ask for specific devices
+ continue;
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+ devices.emplace_back(device.release());
+ }
+ CHECK_AS_EXPECTED(params.device_count == devices.size(), HAILO_OUT_OF_PHYSICAL_DEVICES,
+ "Failed to create vdevice. there are not enough free devices. requested: {}, found: {}",
+ params.device_count, devices.size());
+
+ return devices;
+}
+
+Expected<std::vector<std::string>> VDeviceBase::get_device_ids(const hailo_vdevice_params_t ¶ms)
+{
+ if (params.device_ids == nullptr) {
+ // Use device scan pool
+ return Device::scan();
+ }
+ else {
+ std::vector<std::string> device_ids;
+ device_ids.reserve(params.device_count);
+
+ for (size_t i = 0; i < params.device_count; i++) {
+ device_ids.emplace_back(params.device_ids[i].id);
+ }
+
+ return device_ids;
+ }
+}
+
+Expected<NetworkGroupsParamsMap> VDeviceBase::create_local_config_params(Hef &hef, const NetworkGroupsParamsMap &configure_params)
+{
+ for (auto &device : m_devices) {
+ auto status = dynamic_cast<DeviceBase&>(*device).check_hef_is_compatible(hef);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ auto local_config_params = configure_params;
+ if (local_config_params.empty()) {
+ // All stream iface should be the same
+ auto config_params_exp = m_devices[0]->create_configure_params(hef);
+ CHECK_EXPECTED(config_params_exp);
+ local_config_params = config_params_exp.release();
+ }
+
+ /* Validate batch size is identical for all networks in case scheduler is enabled */
+ if (m_core_ops_scheduler) {
+ uint16_t ref_batch_size = UINT16_MAX;
+ for (const auto &ng_params_pair : local_config_params) {
+ for (const auto &network_params_pair : ng_params_pair.second.network_params_by_name) {
+ if (UINT16_MAX == ref_batch_size) {
+ ref_batch_size = network_params_pair.second.batch_size;
+ }
+ CHECK_AS_EXPECTED(ref_batch_size == network_params_pair.second.batch_size, HAILO_INVALID_OPERATION,
+ "When scheduler is enabled, all networks should have the same batch_size. configure_params contains {} and {}. "
+ "To disable scheduler, set HAILO_SCHEDULING_ALGORITHM_NONE in VDevice creation.", ref_batch_size, network_params_pair.second.batch_size);
+ }
+ }
+ }
+
+ return local_config_params;
+}
+
+Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceBase::create_vdevice_network_group(Hef &hef, const std::pair<const std::string, ConfigureNetworkParams> ¶ms)
+{
+ std::vector<std::shared_ptr<CoreOp>> core_ops_bundle; // bundle of the same CoreOps for all devices
+ core_ops_bundle.reserve(m_devices.size());
+
+ // configure all the devices to this ng and then push the core ops to bundle vector
+ for (auto &device : m_devices) {
+ auto ng_vector = device->configure(hef, { std::make_pair(params.first, params.second) });
+ CHECK_EXPECTED(ng_vector);
+
+ assert(1 == ng_vector->size());
+ auto network_group_base = std::dynamic_pointer_cast<ConfiguredNetworkGroupBase>(ng_vector.value()[0]);
+ auto ng_core_ops = network_group_base->get_core_ops();
+
+ core_ops_bundle.insert(core_ops_bundle.begin(), ng_core_ops.begin(), ng_core_ops.end());
+ }
+
+ auto vdevice_netwrok_group_exp = VDeviceCoreOp::create(core_ops_bundle, m_core_ops_scheduler, hef.hash());
+ CHECK_EXPECTED(vdevice_netwrok_group_exp);
+ auto vdevice_netwrok_group = vdevice_netwrok_group_exp.release();
+
+ auto ng_handle = INVALID_CORE_OP_HANDLE;
+ if (m_core_ops_scheduler) {
+ auto core_op_handle_exp = m_core_ops_scheduler->add_core_op(vdevice_netwrok_group);
+ CHECK_EXPECTED(core_op_handle_exp);
+ ng_handle = core_op_handle_exp.release();
+ }
+ vdevice_netwrok_group->set_core_op_handle(ng_handle);
+ auto status = vdevice_netwrok_group->create_vdevice_streams_from_config_params(make_shared_nothrow<PipelineMultiplexer>(), ng_handle);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return vdevice_netwrok_group;
+}
+
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_core_op.cpp
+ * @brief: VDeviceCoreOp implementation
+ **/
+
+#include "vdevice/vdevice_core_op.hpp"
+#include "vdevice/vdevice_stream.hpp"
+#include "vdevice/vdevice_stream_multiplexer_wrapper.hpp"
+#include "net_flow/pipeline/vstream_internal.hpp"
+#include "utils/profiler/tracer_macros.hpp"
+
+
+namespace hailort
+{
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> VDeviceActivatedCoreOp::create(
+ std::vector<std::shared_ptr<CoreOp>> &core_ops,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ const hailo_activate_network_group_params_t &network_group_params,
+ EventPtr core_op_activated_event, uint16_t dynamic_batch_size,
+ AccumulatorPtr deactivation_time_accumulator,
+ bool resume_pending_stream_transfers)
+{
+ auto status = HAILO_UNINITIALIZED;
+ std::vector<std::unique_ptr<ActivatedNetworkGroup>> activated_network_groups;
+ activated_network_groups.reserve(core_ops.size());
+ for (auto core_op : core_ops) {
+ auto ang = core_op->create_activated_network_group(network_group_params, dynamic_batch_size,
+ resume_pending_stream_transfers);
+ CHECK_EXPECTED(ang);
+ activated_network_groups.emplace_back(ang.release());
+ }
+ auto ang = VDeviceActivatedCoreOp(std::move(activated_network_groups), input_streams, output_streams,
+ network_group_params, core_op_activated_event, deactivation_time_accumulator, status);
+
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr =
+ make_unique_nothrow<VDeviceActivatedCoreOp>(std::move(ang));
+ CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ status = core_op_activated_event->signal();
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed to signal network activation event");
+
+ return activated_net_group_ptr;
+}
+
+const std::string &VDeviceActivatedCoreOp::get_network_group_name() const
+{
+ // network_group_name is same across all NGs
+ return m_activated_network_groups[0]->get_network_group_name();
+}
+
+Expected<Buffer> VDeviceActivatedCoreOp::get_intermediate_buffer(const IntermediateBufferKey &key)
+{
+ CHECK_AS_EXPECTED(1 == m_activated_network_groups.size(), HAILO_INVALID_OPERATION, "getting intermediate buffer is supported only over single device");
+ return m_activated_network_groups[0]->get_intermediate_buffer(key);
+}
+
+hailo_status VDeviceActivatedCoreOp::set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset)
+{
+ for (auto &activated_network_group : m_activated_network_groups) {
+ auto status = activated_network_group->set_keep_nn_config_during_reset(keep_nn_config_during_reset);
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+}
+
+VDeviceActivatedCoreOp::VDeviceActivatedCoreOp(std::vector<std::unique_ptr<ActivatedNetworkGroup>> &&activated_network_groups,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams, std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ const hailo_activate_network_group_params_t &network_group_params, EventPtr core_op_activated_event, AccumulatorPtr deactivation_time_accumulator, hailo_status &status)
+ : ActivatedCoreOp(network_group_params, input_streams, output_streams, std::move(core_op_activated_event), status),
+ m_activated_network_groups(std::move(activated_network_groups)), m_should_reset_core_op(true), m_deactivation_time_accumulator(deactivation_time_accumulator)
+{
+}
+
+VDeviceActivatedCoreOp::VDeviceActivatedCoreOp(VDeviceActivatedCoreOp &&other) noexcept :
+ ActivatedCoreOp(std::move(other)),
+ m_activated_network_groups(std::move(other.m_activated_network_groups)),
+ m_should_reset_core_op(std::exchange(other.m_should_reset_core_op, false)),
+ m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator))
+{
+}
+
+
+Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::create(std::vector<std::shared_ptr<CoreOp>> core_ops,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler, const std::string &hef_hash)
+{
+ auto status = HAILO_UNINITIALIZED;
+
+ VDeviceCoreOp object(std::move(core_ops), core_ops_scheduler, hef_hash, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto obj_ptr = make_shared_nothrow<VDeviceCoreOp>(std::move(object));
+ CHECK_NOT_NULL_AS_EXPECTED(obj_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return obj_ptr;
+}
+
+Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::duplicate(std::shared_ptr<VDeviceCoreOp> other)
+{
+ auto status = HAILO_UNINITIALIZED;
+ auto copy = other->m_core_ops;
+
+ VDeviceCoreOp object(std::move(copy), other->m_core_ops_scheduler, other->m_hef_hash, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto obj_ptr = make_shared_nothrow<VDeviceCoreOp>(std::move(object));
+ CHECK_NOT_NULL_AS_EXPECTED(obj_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return obj_ptr;
+}
+
+
+VDeviceCoreOp::VDeviceCoreOp(std::vector<std::shared_ptr<CoreOp>> core_ops,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler, const std::string &hef_hash, hailo_status &status) :
+ CoreOp(core_ops[0]->m_config_params, core_ops[0]->m_metadata, status),
+ m_core_ops(std::move(core_ops)),
+ m_core_ops_scheduler(core_ops_scheduler),
+ m_scheduler_handle(INVALID_CORE_OP_HANDLE),
+ m_multiplexer_handle(0),
+ m_multiplexer(),
+ m_hef_hash(hef_hash)
+{}
+
+Expected<hailo_stream_interface_t> VDeviceCoreOp::get_default_streams_interface()
+{
+ auto first_streams_interface = m_core_ops[0]->get_default_streams_interface();
+ CHECK_EXPECTED(first_streams_interface);
+#ifndef NDEBUG
+ // Check that all physical devices has the same interface
+ for (auto &core_op : m_core_ops) {
+ auto iface = core_op->get_default_streams_interface();
+ CHECK_EXPECTED(iface);
+ CHECK_AS_EXPECTED(iface.value() == first_streams_interface.value(), HAILO_INTERNAL_FAILURE,
+ "Not all default stream interfaces are the same");
+ }
+#endif
+ return first_streams_interface;
+}
+
+hailo_status VDeviceCoreOp::create_vdevice_streams_from_config_params(std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle)
+{
+ // TODO - HRT-6931 - raise error on this case
+ if (((m_config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) && (1 < m_core_ops.size())) {
+ LOGGER__WARNING("Latency measurement is not supported on more than 1 physical device.");
+ }
+
+ m_multiplexer = multiplexer;
+
+ for (const auto &stream_parameters_pair : m_config_params.stream_params_by_name) {
+ switch (stream_parameters_pair.second.direction) {
+ case HAILO_H2D_STREAM:
+ {
+ auto status = create_input_vdevice_stream_from_config_params(stream_parameters_pair.second,
+ stream_parameters_pair.first, multiplexer, scheduler_handle);
+ CHECK_SUCCESS(status);
+ }
+ break;
+ case HAILO_D2H_STREAM:
+ {
+ auto status = create_output_vdevice_stream_from_config_params(stream_parameters_pair.second,
+ stream_parameters_pair.first, multiplexer, scheduler_handle);
+ CHECK_SUCCESS(status);
+ }
+ break;
+ default:
+ LOGGER__ERROR("stream name {} direction is invalid.", stream_parameters_pair.first);
+ return HAILO_INVALID_ARGUMENT;
+ }
+ }
+
+ for (const auto &input_stream : m_input_streams) {
+ if (HAILO_STREAM_INTERFACE_ETH == static_cast<InputStreamBase&>(*input_stream.second).get_interface()) {
+ continue;
+ }
+ auto expected_queue_size = static_cast<InputStreamBase&>(*input_stream.second).get_buffer_frames_size();
+ CHECK_EXPECTED_AS_STATUS(expected_queue_size);
+ TRACE(CreateCoreOpInputStreamsTrace, "", name(), input_stream.first, (uint32_t)expected_queue_size.value());
+ }
+ for (const auto &output_stream : m_output_streams) {
+ if ((hailo_format_order_t::HAILO_FORMAT_ORDER_HAILO_NMS == (static_cast<OutputStreamBase&>(*output_stream.second).get_layer_info().format.order)) ||
+ (HAILO_STREAM_INTERFACE_ETH == static_cast<OutputStreamBase&>(*output_stream.second).get_interface())) {
+ continue;
+ }
+ auto expected_queue_size = static_cast<OutputStreamBase&>(*output_stream.second).get_buffer_frames_size();
+ CHECK_EXPECTED_AS_STATUS(expected_queue_size);
+ TRACE(CreateCoreOpOutputStreamsTrace, "", name(), output_stream.first, (uint32_t)expected_queue_size.value());
+ }
+
+ auto status = m_multiplexer->add_core_op_instance(m_multiplexer_handle, *this);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceCoreOp::create_input_vdevice_stream_from_config_params(const hailo_stream_parameters_t &stream_params,
+ const std::string &stream_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle)
+{
+ auto edge_layer = get_layer_info(stream_name);
+ CHECK_EXPECTED_AS_STATUS(edge_layer);
+
+ if (HailoRTCommon::is_vdma_stream_interface(stream_params.stream_interface)){
+ std::vector<std::reference_wrapper<VdmaInputStream>> low_level_streams;
+ low_level_streams.reserve(m_core_ops.size());
+ for (auto &core_op : m_core_ops) {
+ auto stream = core_op->get_input_stream_by_name(stream_name);
+ CHECK(stream, HAILO_INTERNAL_FAILURE);
+ low_level_streams.emplace_back(dynamic_cast<VdmaInputStream&>(stream.release().get()));
+ }
+ auto input_stream = InputVDeviceBaseStream::create(std::move(low_level_streams), edge_layer.value(),
+ scheduler_handle, m_core_op_activated_event, m_core_ops_scheduler);
+ CHECK_EXPECTED_AS_STATUS(input_stream);
+ auto input_stream_wrapper = VDeviceInputStreamMultiplexerWrapper::create(input_stream.release(), edge_layer->network_name, multiplexer, scheduler_handle);
+ CHECK_EXPECTED_AS_STATUS(input_stream_wrapper);
+ m_input_streams.insert(make_pair(stream_name, input_stream_wrapper.release()));
+ } else {
+ assert(1 == m_core_ops.size());
+ auto stream = m_core_ops[0]->get_input_stream_by_name(stream_name);
+ CHECK(stream, HAILO_INTERNAL_FAILURE);
+ assert(1 == m_core_ops.size());
+ assert(contains(m_core_ops[0]->m_input_streams, stream_name));
+ m_input_streams.insert(make_pair(stream_name, m_core_ops[0]->m_input_streams.at(stream_name)));
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceCoreOp::create_output_vdevice_stream_from_config_params(const hailo_stream_parameters_t &stream_params,
+ const std::string &stream_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle)
+{
+ auto edge_layer = get_layer_info(stream_name);
+ CHECK_EXPECTED_AS_STATUS(edge_layer);
+
+ if (HailoRTCommon::is_vdma_stream_interface(stream_params.stream_interface)) {
+ std::vector<std::reference_wrapper<VdmaOutputStream>> low_level_streams;
+ low_level_streams.reserve(m_core_ops.size());
+ for (auto &core_op : m_core_ops) {
+ auto stream = core_op->get_output_stream_by_name(stream_name);
+ CHECK(stream, HAILO_INTERNAL_FAILURE);
+ low_level_streams.emplace_back(dynamic_cast<VdmaOutputStream&>(stream.release().get()));
+ }
+ auto output_stream = OutputVDeviceBaseStream::create(std::move(low_level_streams), edge_layer.value(),
+ scheduler_handle, m_core_op_activated_event, m_core_ops_scheduler);
+ CHECK_EXPECTED_AS_STATUS(output_stream);
+ auto output_stream_wrapper = VDeviceOutputStreamMultiplexerWrapper::create(output_stream.release(), edge_layer->network_name, multiplexer, scheduler_handle);
+ CHECK_EXPECTED_AS_STATUS(output_stream_wrapper);
+ m_output_streams.insert(make_pair(stream_name, output_stream_wrapper.release()));
+ } else {
+ assert(1 == m_core_ops.size());
+ assert(contains(m_core_ops[0]->m_output_streams, stream_name));
+ m_output_streams.insert(make_pair(stream_name, m_core_ops[0]->m_output_streams.at(stream_name)));
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceCoreOp::create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceCoreOp> other)
+{
+ // TODO - HRT-6931 - raise error on this case
+ if (((m_config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) && (1 < m_core_ops.size())) {
+ LOGGER__WARNING("Latency measurement is not supported on more than 1 physical device.");
+ }
+
+ m_multiplexer = other->m_multiplexer;
+ m_multiplexer_handle = other->multiplexer_duplicates_count() + 1;
+
+ for (auto &name_stream_pair : other->m_input_streams) {
+ auto input_stream = static_cast<VDeviceInputStreamMultiplexerWrapper*>(name_stream_pair.second.get());
+ auto copy = input_stream->clone(m_multiplexer_handle);
+ CHECK_EXPECTED_AS_STATUS(copy);
+
+ m_input_streams.insert(make_pair(name_stream_pair.first, copy.release()));
+ }
+
+ for (auto &name_stream_pair : other->m_output_streams) {
+ auto output_stream = static_cast<VDeviceOutputStreamMultiplexerWrapper*>(name_stream_pair.second.get());
+ auto copy = output_stream->clone(m_multiplexer_handle);
+ CHECK_EXPECTED_AS_STATUS(copy);
+
+ m_output_streams.insert(make_pair(name_stream_pair.first, copy.release()));
+ }
+
+ auto status = other->m_multiplexer->add_core_op_instance(m_multiplexer_handle, *this);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+void VDeviceCoreOp::set_core_op_handle(scheduler_core_op_handle_t handle)
+{
+ m_scheduler_handle = handle;
+}
+
+scheduler_core_op_handle_t VDeviceCoreOp::core_op_handle() const
+{
+ return m_scheduler_handle;
+}
+
+bool VDeviceCoreOp::is_scheduled() const
+{
+ return !m_core_ops_scheduler.expired();
+};
+
+hailo_status VDeviceCoreOp::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
+{
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INVALID_OPERATION,
+ "Cannot set scheduler timeout for core-op {}, as it is configured on a vdevice which does not have scheduling enabled", name());
+ if (network_name != HailoRTDefaults::get_network_name(name())) {
+ CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler timeout for a specific network is currently not supported");
+ }
+ auto status = core_ops_scheduler->set_timeout(m_scheduler_handle, timeout, network_name);
+ CHECK_SUCCESS(status);
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceCoreOp::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
+{
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INVALID_OPERATION,
+ "Cannot set scheduler threshold for core-op {}, as it is configured on a vdevice which does not have scheduling enabled", name());
+ if (network_name != HailoRTDefaults::get_network_name(name())) {
+ CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler threshold for a specific network is currently not supported");
+ }
+ auto status = core_ops_scheduler->set_threshold(m_scheduler_handle, threshold, network_name);
+ CHECK_SUCCESS(status);
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceCoreOp::set_scheduler_priority(uint8_t priority, const std::string &network_name)
+{
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INVALID_OPERATION,
+ "Cannot set scheduler priority for core-op {}, as it is configured on a vdevice which does not have scheduling enabled", name());
+ if (network_name != HailoRTDefaults::get_network_name(name())) {
+ CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler priority for a specific network is currently not supported");
+ }
+ auto status = core_ops_scheduler->set_priority(m_scheduler_handle, priority, network_name);
+ CHECK_SUCCESS(status);
+ return HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<LatencyMetersMap>> VDeviceCoreOp::get_latency_meters()
+{
+ return m_core_ops[0]->get_latency_meters();
+}
+
+Expected<vdma::BoundaryChannelPtr> VDeviceCoreOp::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
+{
+ CHECK_AS_EXPECTED(1 == m_core_ops.size(), HAILO_INVALID_OPERATION,
+ "get_boundary_vdma_channel_by_stream_name function is not supported on more than 1 physical device.");
+
+ return m_core_ops[0]->get_boundary_vdma_channel_by_stream_name(stream_name);
+}
+
+void VDeviceCoreOp::set_vstreams_multiplexer_callbacks(std::vector<OutputVStream> &output_vstreams)
+{
+ if (nullptr == m_multiplexer) {
+ return;
+ }
+
+ m_multiplexer->set_output_vstreams_names(m_multiplexer_handle, output_vstreams);
+
+ for (auto &vstream : output_vstreams) {
+ static_cast<OutputVStreamImpl&>(*vstream.m_vstream).set_on_vstream_cant_read_callback([this, name = vstream.name()] () {
+ m_multiplexer->set_can_output_vstream_read(m_multiplexer_handle, name, false);
+ });
+ static_cast<OutputVStreamImpl&>(*vstream.m_vstream).set_on_vstream_can_read_callback([this, name = vstream.name()] () {
+ m_multiplexer->set_can_output_vstream_read(m_multiplexer_handle, name, true);
+ });
+ }
+}
+
+Expected<std::shared_ptr<VdmaConfigCoreOp>> VDeviceCoreOp::get_core_op_by_device_index(uint32_t device_index)
+{
+ CHECK_AS_EXPECTED(device_index < m_core_ops.size(), HAILO_INVALID_ARGUMENT);
+ auto core_op = std::dynamic_pointer_cast<VdmaConfigCoreOp>(m_core_ops[device_index]);
+ CHECK_NOT_NULL_AS_EXPECTED(core_op, HAILO_INTERNAL_FAILURE);
+ return core_op;
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> VDeviceCoreOp::create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
+ bool resume_pending_stream_transfers)
+{
+ auto start_time = std::chrono::steady_clock::now();
+
+ CHECK_AS_EXPECTED(!m_core_ops_scheduler.lock(), HAILO_INVALID_OPERATION,
+ "Manually activating a core-op is not allowed when the core-op scheduler is active!");
+
+ auto res = VDeviceActivatedCoreOp::create(m_core_ops, m_input_streams, m_output_streams,
+ network_group_params, m_core_op_activated_event, dynamic_batch_size, m_deactivation_time_accumulator,
+ resume_pending_stream_transfers);
+ const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
+ std::chrono::steady_clock::now() - start_time).count();
+ CHECK_EXPECTED(res);
+
+ LOGGER__INFO("Activating {} on VDevice took {} milliseconds. Note that the function is asynchronous and"
+ " thus the network is not fully activated yet.", name(), elapsed_time_ms);
+ m_activation_time_accumulator->add_data_point(elapsed_time_ms);
+
+ return res;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_core_op.hpp
+ * @brief Class declaration for VDeviceCoreOp, which is used to support multiple CoreOps objects,
+ * that encapsulate the same actual CoreOp.
+ **/
+
+#ifndef _HAILO_VDEVICE_CORE_OP_HPP_
+#define _HAILO_VDEVICE_CORE_OP_HPP_
+
+#include "hailo/hailort.h"
+#include "common/utils.hpp"
+#include "hailo/network_group.hpp"
+#include "hailo/vstream.hpp"
+
+#include "vdevice/scheduler/network_group_scheduler.hpp"
+#include "vdevice/pipeline_multiplexer.hpp"
+
+#include <cstdint>
+
+
+namespace hailort
+{
+
+class VDeviceActivatedCoreOp : public ActivatedCoreOp
+{
+public:
+ static Expected<std::unique_ptr<ActivatedNetworkGroup>> create(std::vector<std::shared_ptr<CoreOp>> &core_ops,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ const hailo_activate_network_group_params_t &network_group_params, EventPtr core_op_activated_event,
+ uint16_t dynamic_batch_size, AccumulatorPtr deactivation_time_accumulator,
+ bool resume_pending_stream_transfers);
+
+ virtual ~VDeviceActivatedCoreOp()
+ {
+ if (!m_should_reset_core_op) {
+ return;
+ }
+ const auto start_time = std::chrono::steady_clock::now();
+
+ m_core_op_activated_event->reset();
+ m_activated_network_groups.clear();
+
+ const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
+ std::chrono::steady_clock::now() - start_time).count();
+ LOGGER__INFO("Deactivating took {} ms", elapsed_time_ms);
+ m_deactivation_time_accumulator->add_data_point(elapsed_time_ms);
+ }
+
+ VDeviceActivatedCoreOp(const VDeviceActivatedCoreOp &other) = delete;
+ VDeviceActivatedCoreOp &operator=(const VDeviceActivatedCoreOp &other) = delete;
+ VDeviceActivatedCoreOp &operator=(VDeviceActivatedCoreOp &&other) = delete;
+ VDeviceActivatedCoreOp(VDeviceActivatedCoreOp &&other) noexcept;
+
+ virtual const std::string &get_network_group_name() const override;
+ virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override;
+ virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) override;
+
+private:
+ VDeviceActivatedCoreOp(
+ std::vector<std::unique_ptr<ActivatedNetworkGroup>> &&activated_network_groups,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ const hailo_activate_network_group_params_t &network_group_params, EventPtr core_op_activated_event,
+ AccumulatorPtr deactivation_time_accumulator, hailo_status &status);
+
+ std::vector<std::unique_ptr<ActivatedNetworkGroup>> m_activated_network_groups;
+ bool m_should_reset_core_op;
+ AccumulatorPtr m_deactivation_time_accumulator;
+};
+
+class VDeviceCoreOp : public CoreOp
+{
+public:
+ static Expected<std::shared_ptr<VDeviceCoreOp>> create(std::vector<std::shared_ptr<CoreOp>> core_ops,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler, const std::string &hef_hash);
+
+ static Expected<std::shared_ptr<VDeviceCoreOp>> duplicate(std::shared_ptr<VDeviceCoreOp> other);
+
+ virtual ~VDeviceCoreOp() = default;
+ VDeviceCoreOp(const VDeviceCoreOp &other) = delete;
+ VDeviceCoreOp &operator=(const VDeviceCoreOp &other) = delete;
+ VDeviceCoreOp &operator=(VDeviceCoreOp &&other) = delete;
+ VDeviceCoreOp(VDeviceCoreOp &&other) = default;
+
+ hailo_status create_vdevice_streams_from_config_params(std::shared_ptr<PipelineMultiplexer> multiplexer,
+ scheduler_core_op_handle_t scheduler_handle);
+ hailo_status create_input_vdevice_stream_from_config_params(
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name,
+ std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle);
+ hailo_status create_output_vdevice_stream_from_config_params(
+ const hailo_stream_parameters_t &stream_params, const std::string &stream_name,
+ std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle);
+
+ hailo_status create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceCoreOp> other);
+
+ bool equals(const Hef &hef, const std::pair<const std::string, ConfigureNetworkParams> ¶ms_pair)
+ {
+ if ((params_pair.first == name()) && (hef.hash() == m_hef_hash)) {
+ if ((params_pair.second.batch_size == m_config_params.batch_size) &&
+ (params_pair.second.power_mode == m_config_params.power_mode)) {
+ return true;
+ }
+ LOGGER__INFO("The network group: {} was already configured to the device with different params."
+ " To use the Stream Multiplexer configure the network with the same params.", name());
+ }
+
+ return false;
+ }
+
+ uint32_t multiplexer_duplicates_count()
+ {
+ assert(m_multiplexer->instances_count() > 0);
+ return static_cast<uint32_t>(m_multiplexer->instances_count() - 1);
+ }
+
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
+
+ virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
+ virtual Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(
+ const std::string &stream_name) override;
+
+ void set_core_op_handle(scheduler_core_op_handle_t handle);
+ scheduler_core_op_handle_t core_op_handle() const;
+ virtual bool is_scheduled() const override;
+ virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override;
+
+ void set_vstreams_multiplexer_callbacks(std::vector<OutputVStream> &output_vstreams) override;
+
+ virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override
+ {
+ CHECK(!m_core_ops_scheduler.lock(), HAILO_INVALID_OPERATION,
+ "Waiting for core-op activation is not allowed when the core-ops scheduler is active!");
+
+ return m_core_op_activated_event->wait(timeout);
+ }
+
+ virtual hailo_status activate_impl(uint16_t /*dynamic_batch_size*/, bool /* resume_pending_stream_transfers */) override
+ {
+ return HAILO_INTERNAL_FAILURE;
+ }
+
+ virtual hailo_status deactivate_impl(bool /* keep_nn_config_during_reset */) override
+ {
+ return HAILO_INTERNAL_FAILURE;
+ }
+
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
+ bool resume_pending_stream_transfers) override;
+
+ Expected<std::shared_ptr<VdmaConfigCoreOp>> get_core_op_by_device_index(uint32_t device_index);
+
+private:
+ VDeviceCoreOp(std::vector<std::shared_ptr<CoreOp>> core_ops, CoreOpsSchedulerWeakPtr core_ops_scheduler,
+ const std::string &hef_hash, hailo_status &status);
+
+ std::vector<std::shared_ptr<CoreOp>> m_core_ops;
+ CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
+ scheduler_core_op_handle_t m_scheduler_handle;
+ multiplexer_core_op_handle_t m_multiplexer_handle;
+ std::shared_ptr<PipelineMultiplexer> m_multiplexer;
+ std::string m_hef_hash;
+};
+
+}
+
+#endif /* _HAILO_VDEVICE_CORE_OP_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_internal.hpp
+ * @brief Class declaration for VDeviceBase that implements the basic VDevice "interface".
+ * Hence, the hiearchy is as follows:
+ *
+ * VDevice (External "interface")
+ * |
+ * |-- VDeviceHandle (VDevice handle for a possibly shared VDeviceBase
+ * | when hailort is running as single process)
+ * |-- VDeviceClient (VDevice client for a possibly shared VDeviceBase
+ * | when hailort is running as a service)
+ * |-- VDeviceBase (Actual implementations)
+ * |
+ * |-- std::vector<VdmaDevice>
+ **/
+
+#ifndef _HAILO_VDEVICE_INTERNAL_HPP_
+#define _HAILO_VDEVICE_INTERNAL_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/vdevice.hpp"
+
+#include "vdma/vdma_device.hpp"
+#include "vdma/vdma_config_manager.hpp"
+#include "vdevice/vdevice_core_op.hpp"
+#include "vdevice/scheduler/network_group_scheduler.hpp"
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+#include "service/hailort_rpc_client.hpp"
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+
+namespace hailort
+{
+
+
+class VDeviceBase : public VDevice
+{
+public:
+ static Expected<std::unique_ptr<VDeviceBase>> create(const hailo_vdevice_params_t ¶ms);
+ VDeviceBase(VDeviceBase &&) = delete;
+ VDeviceBase(const VDeviceBase &) = delete;
+ VDeviceBase &operator=(VDeviceBase &&) = delete;
+ VDeviceBase &operator=(const VDeviceBase &) = delete;
+ virtual ~VDeviceBase() = default;
+
+ virtual Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params={}) override;
+
+ virtual Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override
+ {
+ // Return Expected for future functionality
+ std::vector<std::reference_wrapper<Device>> devices_refs;
+ for (auto &device : m_devices) {
+ devices_refs.push_back(*device);
+ }
+ return devices_refs;
+ }
+
+ virtual Expected<std::vector<std::string>> get_physical_devices_ids() const override
+ {
+ std::vector<std::string> device_ids;
+ device_ids.reserve(m_devices.size());
+ for (auto &device : m_devices) {
+ device_ids.push_back(device.get()->get_dev_id());
+ }
+ return device_ids;
+ }
+
+ const CoreOpsSchedulerPtr &core_ops_scheduler()
+ {
+ return m_core_ops_scheduler;
+ }
+
+ // Currently only homogeneous vDevice is allow (= all devices are from the same type)
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
+
+ static hailo_status validate_params(const hailo_vdevice_params_t ¶ms);
+
+private:
+ VDeviceBase(std::vector<std::unique_ptr<Device>> &&devices, CoreOpsSchedulerPtr core_ops_scheduler) :
+ m_devices(std::move(devices)), m_core_ops_scheduler(core_ops_scheduler)
+ {}
+
+ static Expected<std::vector<std::unique_ptr<Device>>> create_devices(const hailo_vdevice_params_t ¶ms);
+ static Expected<std::vector<std::string>> get_device_ids(const hailo_vdevice_params_t ¶ms);
+ Expected<NetworkGroupsParamsMap> create_local_config_params(Hef &hef, const NetworkGroupsParamsMap &configure_params);
+ Expected<std::shared_ptr<VDeviceCoreOp>> create_vdevice_network_group(Hef &hef, const std::pair<const std::string, ConfigureNetworkParams> ¶ms);
+
+ std::vector<std::unique_ptr<Device>> m_devices;
+ CoreOpsSchedulerPtr m_core_ops_scheduler;
+ std::vector<std::shared_ptr<VDeviceCoreOp>> m_vdevice_core_ops;
+ std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups; // TODO: HRT-9547 - Remove when ConfiguredNetworkGroup will be kept in global context
+
+ std::mutex m_mutex;
+};
+
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+class VDeviceClient : public VDevice
+{
+public:
+ static Expected<std::unique_ptr<VDevice>> create(const hailo_vdevice_params_t ¶ms);
+
+ VDeviceClient(VDeviceClient &&) = delete;
+ VDeviceClient(const VDeviceClient &) = delete;
+ VDeviceClient &operator=(VDeviceClient &&) = delete;
+ VDeviceClient &operator=(const VDeviceClient &) = delete;
+ virtual ~VDeviceClient();
+
+ Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params={}) override;
+
+ Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override;
+
+ Expected<std::vector<std::string>> get_physical_devices_ids() const override;
+ Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
+
+ virtual hailo_status before_fork() override;
+ virtual hailo_status after_fork_in_parent() override;
+ virtual hailo_status after_fork_in_child() override;
+
+private:
+ VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle);
+
+ hailo_status create_client();
+
+ std::unique_ptr<HailoRtRpcClient> m_client;
+ uint32_t m_handle;
+ std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups;
+};
+
+#endif // HAILO_SUPPORT_MULTI_PROCESS
+
+class VDeviceHandle : public VDevice
+{
+public:
+ static Expected<std::unique_ptr<VDevice>> create(const hailo_vdevice_params_t ¶ms);
+
+ VDeviceHandle(VDeviceHandle &&) = delete;
+ VDeviceHandle(const VDeviceHandle &) = delete;
+ VDeviceHandle &operator=(VDeviceHandle &&) = delete;
+ VDeviceHandle &operator=(const VDeviceHandle &) = delete;
+ virtual ~VDeviceHandle();
+
+ Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
+ const NetworkGroupsParamsMap &configure_params={}) override;
+
+ Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override;
+ Expected<std::vector<std::string>> get_physical_devices_ids() const override;
+ Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
+
+private:
+ VDeviceHandle(uint32_t handle);
+ uint32_t m_handle;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_DEVICE_INTERNAL_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_native_stream.hpp
+ * @brief Internal stream implementation for native streams
+ *
+ **/
+
+#ifndef HAILO_VDEVICE_NATIVE_STREAM_HPP_
+#define HAILO_VDEVICE_NATIVE_STREAM_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "vdevice_stream.hpp"
+
+
+namespace hailort
+{
+
+class InputVDeviceNativeStream : public InputVDeviceBaseStream {
+public:
+ InputVDeviceNativeStream(
+ std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
+ EventPtr &&core_op_activated_event,
+ const LayerInfo &layer_info,
+ hailo_status &status) :
+ InputVDeviceBaseStream(std::move(streams), std::move(core_op_activated_event), layer_info, status)
+ {}
+
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual bool is_scheduled() override { return false; };
+
+protected:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer,
+ const std::function<bool()> &should_cancel = []() { return false; }) override;
+};
+
+class OutputVDeviceNativeStream : public OutputVDeviceBaseStream {
+public:
+ OutputVDeviceNativeStream(
+ std::vector<std::reference_wrapper<VdmaOutputStream>> &&streams,
+ const LayerInfo &layer_info,
+ EventPtr &&core_op_activated_event,
+ hailo_status &status) :
+ OutputVDeviceBaseStream(std::move(streams), layer_info, std::move(core_op_activated_event), status)
+ {}
+
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual bool is_scheduled() override { return false; };
+
+protected:
+ virtual hailo_status read(MemoryView buffer) override;;
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_VDEVICE_NATIVE_STREAM_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_stream.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/stream.hpp"
+#include "hailo/hef.hpp"
+#include "hailo/hailort_common.hpp"
+
+#include "common/utils.hpp"
+
+#include "utils/profiler/tracer_macros.hpp"
+#include "vdevice/vdevice_stream.hpp"
+#include "vdevice/vdevice_native_stream.hpp"
+#include "vdevice/scheduler/multi_device_scheduled_stream.hpp"
+#include "vdevice/scheduler/scheduled_stream.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+
+#include <new>
+
+
+namespace hailort
+{
+
+hailo_status InputVDeviceBaseStream::deactivate_stream()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto deactivate_status = stream.get().deactivate_stream();
+ if (HAILO_SUCCESS != deactivate_status) {
+ LOGGER__ERROR("Failed to deactivate input stream. (status: {} device: {})", deactivate_status, stream.get().get_dev_id());
+ status = deactivate_status;
+ }
+ }
+ m_is_stream_activated = false;
+ return status;
+}
+
+/** Input stream **/
+InputVDeviceBaseStream::~InputVDeviceBaseStream()
+{
+ // We want to stop the vdma channel before closing the stream in the firmware
+ // because sending data to a closed stream may terminate the dma engine
+ if (m_is_stream_activated) {
+ (void)deactivate_stream();
+ }
+}
+
+hailo_status InputVDeviceBaseStream::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ for (auto &stream : m_streams) {
+ auto status = stream.get().activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to activate input stream. (device: {})", stream.get().get_dev_id());
+ deactivate_stream();
+ return status;
+ }
+ }
+ m_is_stream_activated = true;
+ return HAILO_SUCCESS;
+}
+
+hailo_status InputVDeviceBaseStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
+{
+ ASSERT(NULL != buffer);
+
+ return sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, size)).status();
+}
+
+hailo_status InputVDeviceBaseStream::send_pending_buffer(size_t device_index)
+{
+ assert(1 == m_streams.size());
+ CHECK(0 == device_index, HAILO_INVALID_OPERATION);
+ VdmaInputStream &vdma_input = static_cast<VdmaInputStream&>(m_streams[m_next_transfer_stream_index].get());
+ return vdma_input.send_pending_buffer();
+}
+
+Expected<size_t> InputVDeviceBaseStream::get_buffer_frames_size() const
+{
+ size_t total_buffers_size = 0;
+ for (auto &stream : m_streams) {
+ auto stream_buffer_size = stream.get().get_buffer_frames_size();
+ CHECK_EXPECTED(stream_buffer_size);
+ total_buffers_size += stream_buffer_size.value();
+ }
+
+ return total_buffers_size;
+}
+
+Expected<size_t> InputVDeviceBaseStream::get_pending_frames_count() const
+{
+ size_t total_pending_frames_count = 0;
+ for (auto &stream : m_streams) {
+ auto stream_pending_frames_count = stream.get().get_pending_frames_count();
+ CHECK_EXPECTED(stream_pending_frames_count);
+ total_pending_frames_count += stream_pending_frames_count.value();
+ }
+
+ return total_pending_frames_count;
+}
+
+Expected<std::unique_ptr<InputVDeviceBaseStream>> InputVDeviceBaseStream::create(std::vector<std::reference_wrapper<VdmaInputStream>> &&low_level_streams,
+ const LayerInfo &edge_layer, const scheduler_core_op_handle_t &core_op_handle,
+ EventPtr core_op_activated_event, CoreOpsSchedulerWeakPtr core_ops_scheduler)
+{
+ assert(0 < low_level_streams.size());
+ auto status = HAILO_UNINITIALIZED;
+
+ std::unique_ptr<InputVDeviceBaseStream> local_vdevice_stream;
+
+ if (core_ops_scheduler.lock()) {
+ if (1 < low_level_streams.size()) {
+ auto buffer_frame_size = low_level_streams[0].get().get_buffer_frames_size();
+ CHECK_EXPECTED(buffer_frame_size);
+ auto frame_size = low_level_streams[0].get().get_frame_size();
+ auto buffers_queue_ptr = BuffersQueue::create_unique(frame_size, (low_level_streams.size() * buffer_frame_size.value()));
+ CHECK_EXPECTED(buffers_queue_ptr);
+
+ local_vdevice_stream = make_unique_nothrow<MultiDeviceScheduledInputStream>(std::move(low_level_streams),
+ core_op_handle, std::move(core_op_activated_event), edge_layer,
+ core_ops_scheduler, buffers_queue_ptr.release(), status);
+ } else {
+ local_vdevice_stream = make_unique_nothrow<ScheduledInputStream>(std::move(low_level_streams),
+ core_op_handle, std::move(core_op_activated_event), edge_layer,
+ core_ops_scheduler, status);
+ }
+ } else {
+ local_vdevice_stream = make_unique_nothrow<InputVDeviceNativeStream>(std::move(low_level_streams),
+ std::move(core_op_activated_event), edge_layer,status);
+ }
+
+ CHECK_AS_EXPECTED((nullptr != local_vdevice_stream), HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return local_vdevice_stream;
+}
+
+hailo_status InputVDeviceBaseStream::set_timeout(std::chrono::milliseconds timeout)
+{
+ for (auto &stream : m_streams) {
+ auto status = stream.get().set_timeout(timeout);
+ CHECK_SUCCESS(status, "Failed to set timeout to input stream. (device: {})", stream.get().get_dev_id());
+ }
+ return HAILO_SUCCESS;
+}
+
+std::chrono::milliseconds InputVDeviceBaseStream::get_timeout() const
+{
+ // All timeout values of m_streams should be the same
+ return m_streams[0].get().get_timeout();
+}
+
+hailo_stream_interface_t InputVDeviceBaseStream::get_interface() const
+{
+ // All interface values of m_streams should be the same
+ return m_streams[0].get().get_interface();
+}
+
+hailo_status InputVDeviceBaseStream::flush()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto flush_status = stream.get().flush();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to flush input stream. (status: {} device: {})", status, stream.get().get_dev_id());
+ status = flush_status;
+ }
+ }
+ return status;
+}
+
+Expected<size_t> ScheduledInputStream::sync_write_raw_buffer(const MemoryView &buffer, const std::function<bool()> &should_cancel)
+{
+ return sync_write_raw_buffer_impl(buffer, m_core_op_handle, should_cancel);
+}
+
+Expected<size_t> InputVDeviceNativeStream::sync_write_raw_buffer(const MemoryView &buffer, const std::function<bool()> &should_cancel)
+{
+ if (should_cancel()) {
+ return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
+ }
+
+ auto expected_written_bytes = m_streams[m_next_transfer_stream_index].get().sync_write_raw_buffer(buffer);
+ if (HAILO_SUCCESS != expected_written_bytes.status()) {
+ LOGGER__INFO("Write to stream has failed! status = {}", expected_written_bytes.status());
+ return make_unexpected(expected_written_bytes.status());
+ }
+ auto written_bytes = expected_written_bytes.value();
+
+ // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
+ if (0 == (++m_acc_frames % m_streams[0].get().get_dynamic_batch_size())) {
+ m_next_transfer_stream_index = static_cast<uint32_t>((m_next_transfer_stream_index + 1) % m_streams.size());
+ m_acc_frames = 0;
+ }
+ return written_bytes;
+}
+
+Expected<size_t> ScheduledInputStream::sync_write_raw_buffer_impl(const MemoryView &buffer, scheduler_core_op_handle_t core_op_handle,
+ const std::function<bool()> &should_cancel)
+{
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK_AS_EXPECTED(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto status = core_ops_scheduler->wait_for_write(core_op_handle, name(), get_timeout(), should_cancel);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Write to stream was aborted.");
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ TRACE(WriteFrameTrace, "", core_op_handle, m_stream_info.name);
+
+ assert(1 == m_streams.size());
+ status = m_streams[0].get().write_buffer_only(buffer, should_cancel);
+
+ auto write_finish_status = core_ops_scheduler->signal_write_finish(core_op_handle, name(), status != HAILO_SUCCESS);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__INFO("Write to stream has failed! status = {}", status);
+ return make_unexpected(status);
+ }
+
+ if (HAILO_STREAM_ABORTED_BY_USER == write_finish_status) {
+ return make_unexpected(write_finish_status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(write_finish_status);
+
+ auto written_bytes = buffer.size();
+ return written_bytes;
+}
+
+hailo_status ScheduledInputStream::abort()
+{
+ return abort_impl(m_core_op_handle);
+}
+
+hailo_status InputVDeviceNativeStream::abort()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto abort_status = stream.get().abort();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, stream.get().get_dev_id());
+ status = abort_status;
+ }
+ }
+
+ return status;
+}
+
+hailo_status ScheduledInputStream::abort_impl(scheduler_core_op_handle_t core_op_handle)
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ assert(1 == m_streams.size());
+ auto abort_status = m_streams[0].get().abort();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, m_streams[0].get().get_dev_id());
+ status = abort_status;
+ }
+
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto disable_status = core_ops_scheduler->disable_stream(core_op_handle, name());
+ if (HAILO_SUCCESS != disable_status) {
+ LOGGER__ERROR("Failed to disable stream in the core-op scheduler. (status: {})", disable_status);
+ status = disable_status;
+ }
+
+ return status;
+}
+
+hailo_status ScheduledInputStream::clear_abort()
+{
+ return clear_abort_impl(m_core_op_handle);
+}
+
+hailo_status InputVDeviceNativeStream::clear_abort()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto clear_abort_status = stream.get().clear_abort();
+ if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
+ LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
+ status = clear_abort_status;
+ }
+ }
+
+ return status;
+}
+
+hailo_status ScheduledInputStream::clear_abort_impl(scheduler_core_op_handle_t core_op_handle)
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ assert(1 == m_streams.size());
+ auto clear_abort_status = m_streams[0].get().clear_abort();
+ if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
+ LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, m_streams[0].get().get_dev_id());
+ status = clear_abort_status;
+ }
+
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto enable_status = core_ops_scheduler->enable_stream(core_op_handle, name());
+ if (HAILO_SUCCESS != enable_status) {
+ LOGGER__ERROR("Failed to enable stream in the core-op scheduler. (status: {})", enable_status);
+ status = enable_status;
+ }
+
+ return status;
+}
+
+/** Output stream **/
+hailo_status OutputVDeviceBaseStream::deactivate_stream()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto deactivate_status = stream.get().deactivate_stream();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to deactivate output stream. (status: {} device: {})", status, stream.get().get_dev_id());
+ status = deactivate_status;
+ }
+ }
+ m_is_stream_activated = false;
+ return status;
+}
+
+OutputVDeviceBaseStream::~OutputVDeviceBaseStream()
+{
+ // We want to stop the vdma channel before closing the stream in the firmware
+ // because sending data to a closed stream may terminate the dma engine
+ if (m_is_stream_activated) {
+ (void)deactivate_stream();
+ }
+}
+
+hailo_status OutputVDeviceBaseStream::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ for (auto &stream : m_streams) {
+ auto status = stream.get().activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to activate output stream. (device: {})", stream.get().get_dev_id());
+ deactivate_stream();
+ return status;
+ }
+ }
+ m_is_stream_activated = true;
+ return HAILO_SUCCESS;
+}
+
+hailo_status OutputVDeviceBaseStream::read_all(MemoryView &/*buffer*/)
+{
+ LOGGER__ERROR("read_all should not be called in vdevice flow");
+ return HAILO_INTERNAL_FAILURE;
+}
+
+Expected<size_t> OutputVDeviceBaseStream::sync_read_raw_buffer(MemoryView &/*buffer*/)
+{
+ LOGGER__ERROR("sync_read_raw_buffer should not be called in vdevice flow");
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+hailo_status ScheduledOutputStream::read(MemoryView buffer)
+{
+ return read_impl(buffer, m_core_op_handle);
+}
+
+hailo_status OutputVDeviceNativeStream::read(MemoryView buffer)
+{
+ auto status = m_streams[m_next_transfer_stream_index].get().read(buffer);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__INFO("Read from stream has failed! status = {}", status);
+ return status;
+ }
+
+ // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
+ if (0 == (++m_acc_frames % m_streams[0].get().get_dynamic_batch_size())) {
+ m_next_transfer_stream_index = static_cast<uint32_t>((m_next_transfer_stream_index + 1) % m_streams.size());
+ m_acc_frames = 0;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status ScheduledOutputStream::read_impl(MemoryView buffer, scheduler_core_op_handle_t core_op_handle)
+{
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto device_id = core_ops_scheduler->wait_for_read(core_op_handle, name(), get_timeout());
+ if (HAILO_STREAM_ABORTED_BY_USER == device_id.status()) {
+ LOGGER__INFO("Read from stream was aborted.");
+ return device_id.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(device_id);
+
+ TRACE(ReadFrameTrace, "", core_op_handle, m_stream_info.name);
+ auto status = m_streams[device_id.value()].get().read(buffer);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__INFO("Read from stream has failed! status = {}", status);
+ return status;
+ }
+
+ status = core_ops_scheduler->signal_read_finish(core_op_handle, name(), device_id.value());
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<OutputVDeviceBaseStream>> OutputVDeviceBaseStream::create(std::vector<std::reference_wrapper<VdmaOutputStream>> &&low_level_streams,
+ const LayerInfo &edge_layer, const scheduler_core_op_handle_t &core_op_handle, EventPtr core_op_activated_event,
+ CoreOpsSchedulerWeakPtr core_ops_scheduler)
+{
+ assert(0 < low_level_streams.size());
+ auto status = HAILO_UNINITIALIZED;
+
+ std::unique_ptr<OutputVDeviceBaseStream> local_vdevice_stream;
+ if (core_ops_scheduler.lock()) {
+ local_vdevice_stream = make_unique_nothrow<ScheduledOutputStream>(std::move(low_level_streams), core_op_handle,
+ edge_layer, std::move(core_op_activated_event), core_ops_scheduler, status);
+ } else {
+ local_vdevice_stream = make_unique_nothrow<OutputVDeviceNativeStream>(std::move(low_level_streams), edge_layer,
+ std::move(core_op_activated_event), status);
+ }
+
+ CHECK_AS_EXPECTED((nullptr != local_vdevice_stream), HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return local_vdevice_stream;
+}
+
+hailo_status OutputVDeviceBaseStream::set_timeout(std::chrono::milliseconds timeout)
+{
+ for (auto &stream : m_streams) {
+ auto status = stream.get().set_timeout(timeout);
+ CHECK_SUCCESS(status, "Failed to set timeout to output stream. (device: {})", stream.get().get_dev_id());
+ }
+ return HAILO_SUCCESS;
+}
+
+std::chrono::milliseconds OutputVDeviceBaseStream::get_timeout() const
+{
+ // All timeout values of m_streams should be the same
+ return m_streams[0].get().get_timeout();
+}
+
+hailo_stream_interface_t OutputVDeviceBaseStream::get_interface() const
+{
+ // All interface values of m_streams should be the same
+ return m_streams[0].get().get_interface();
+}
+
+hailo_status ScheduledOutputStream::abort()
+{
+ return abort_impl(m_core_op_handle);
+}
+
+hailo_status OutputVDeviceNativeStream::abort()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto abort_status = stream.get().abort();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, stream.get().get_dev_id());
+ status = abort_status;
+ }
+ }
+
+ return status;
+}
+
+hailo_status ScheduledOutputStream::abort_impl(scheduler_core_op_handle_t core_op_handle)
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto& stream : m_streams) {
+ auto abort_status = stream.get().abort();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, stream.get().get_dev_id());
+ status = abort_status;
+ }
+ }
+
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto disable_status = core_ops_scheduler->disable_stream(core_op_handle, name());
+ if (HAILO_SUCCESS != disable_status) {
+ LOGGER__ERROR("Failed to disable stream in the core-op scheduler. (status: {})", disable_status);
+ status = disable_status;
+ }
+
+ return status;
+}
+
+hailo_status ScheduledOutputStream::clear_abort()
+{
+ return clear_abort_impl(m_core_op_handle);
+}
+
+hailo_status OutputVDeviceNativeStream::clear_abort()
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto &stream : m_streams) {
+ auto clear_abort_status = stream.get().clear_abort();
+ if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
+ LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
+ status = clear_abort_status;
+ }
+ }
+
+ return status;
+}
+
+hailo_status ScheduledOutputStream::clear_abort_impl(scheduler_core_op_handle_t core_op_handle)
+{
+ auto status = HAILO_SUCCESS; // Best effort
+ for (auto& stream : m_streams) {
+ auto clear_abort_status = stream.get().clear_abort();
+ if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
+ LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
+ status = clear_abort_status;
+ }
+ }
+
+ auto core_ops_scheduler = m_core_ops_scheduler.lock();
+ CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+ auto enable_status = core_ops_scheduler->enable_stream(core_op_handle, name());
+ if (HAILO_SUCCESS != enable_status) {
+ LOGGER__ERROR("Failed to enable stream in the core-op scheduler. (status: {})", enable_status);
+ status = enable_status;
+ }
+
+ return status;
+}
+
+Expected<size_t> OutputVDeviceBaseStream::get_buffer_frames_size() const
+{
+ size_t total_buffers_size = 0;
+ for (auto &stream : m_streams) {
+ auto stream_buffer_size = stream.get().get_buffer_frames_size();
+ if (HAILO_NOT_AVAILABLE == stream_buffer_size.status()) {
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+ CHECK_EXPECTED(stream_buffer_size);
+ total_buffers_size += stream_buffer_size.value();
+ }
+
+ return total_buffers_size;
+}
+
+Expected<size_t> OutputVDeviceBaseStream::get_pending_frames_count() const
+{
+ size_t total_pending_frames_count = 0;
+ for (auto &stream : m_streams) {
+ auto stream_pending_frames_count = stream.get().get_pending_frames_count();
+ if (HAILO_NOT_AVAILABLE == stream_pending_frames_count.status()) {
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+ CHECK_EXPECTED(stream_pending_frames_count);
+ total_pending_frames_count += stream_pending_frames_count.value();
+ }
+
+ return total_pending_frames_count;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_stream.hpp
+ * @brief Internal stream implementation for VDevice
+ *
+ * InputStream (External "interface")
+ * |-- InputStreamBase (Base class)
+ * |-- InputVDeviceBaseStream (Base class for vdevice streams)
+ * | |-- InputVDeviceNativeStream
+ * | |-- ScheduledInputStream
+ *
+ * OutputStream (External "interface")
+ * |-- OutputStreamBase (Base class)
+ * |-- OutputVDeviceBaseStream (Base class for vdevice streams)
+ * | |-- OutputVDeviceNativeStream
+ * | |-- ScheduledOutputStream
+ **/
+
+#ifndef HAILO_VDEVICE_STREAM_HPP_
+#define HAILO_VDEVICE_STREAM_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "vdevice/vdevice_internal.hpp"
+#include "vdma/vdma_device.hpp"
+#include "vdma/vdma_stream.hpp"
+#include "stream_common/stream_internal.hpp"
+
+
+namespace hailort
+{
+
+class InputVDeviceBaseStream : public InputStreamBase {
+
+public:
+ static Expected<std::unique_ptr<InputVDeviceBaseStream>> create(std::vector<std::reference_wrapper<VdmaInputStream>> &&low_level_streams,
+ const LayerInfo &edge_layer, const scheduler_core_op_handle_t &core_op_handle,
+ EventPtr core_op_activated_event, CoreOpsSchedulerWeakPtr core_ops_scheduler);
+
+ virtual ~InputVDeviceBaseStream();
+
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override;
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+
+ virtual hailo_status send_pending_buffer(size_t device_index = 0) override;
+ virtual Expected<size_t> get_buffer_frames_size() const override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+ virtual bool is_scheduled() override = 0;
+ virtual hailo_status abort() override = 0;
+ virtual hailo_status clear_abort() override = 0;
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback) override
+ {
+ for (auto &stream : m_streams) {
+ auto status = stream.get().register_interrupt_callback(callback);
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+ }
+
+ virtual void notify_all()
+ {
+ // Overriden in scheduled_stream
+ return;
+ }
+
+protected:
+ virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override
+ {
+ return sync_write_raw_buffer(buffer, []() { return false; });
+ }
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer, const std::function<bool()> &should_cancel) = 0;
+
+ explicit InputVDeviceBaseStream(
+ std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
+ EventPtr &&core_op_activated_event,
+ const LayerInfo &layer_info,
+ hailo_status &status) :
+ InputStreamBase(layer_info, streams[0].get().get_interface(), std::move(core_op_activated_event), status),
+ m_streams(std::move(streams)),
+ m_is_stream_activated(false),
+ m_next_transfer_stream_index(0),
+ m_acc_frames(0)
+ {}
+
+ std::vector<std::reference_wrapper<VdmaInputStream>> m_streams;
+ bool m_is_stream_activated;
+ uint32_t m_next_transfer_stream_index;
+ uint32_t m_acc_frames;
+
+private:
+ friend class VDeviceInputStreamMultiplexerWrapper;
+
+ virtual hailo_status flush() override;
+};
+
+class OutputVDeviceBaseStream : public OutputStreamBase {
+public:
+ virtual ~OutputVDeviceBaseStream();
+
+ static Expected<std::unique_ptr<OutputVDeviceBaseStream>> create(std::vector<std::reference_wrapper<VdmaOutputStream>> &&low_level_streams,
+ const LayerInfo &edge_layer, const scheduler_core_op_handle_t &core_op_handle,
+ EventPtr core_op_activated_event, CoreOpsSchedulerWeakPtr core_ops_scheduler);
+
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override;
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+ virtual Expected<size_t> get_buffer_frames_size() const override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+ virtual hailo_status abort() override = 0;
+ virtual hailo_status clear_abort() override = 0;
+ virtual bool is_scheduled() override = 0;
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback) override
+ {
+ for (auto &stream : m_streams) {
+ auto status = stream.get().register_interrupt_callback(callback);
+ CHECK_SUCCESS(status);
+ }
+ return HAILO_SUCCESS;
+ }
+
+protected:
+ virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer) override;
+
+ explicit OutputVDeviceBaseStream(
+ std::vector<std::reference_wrapper<VdmaOutputStream>> &&streams,
+ const LayerInfo &layer_info,
+ EventPtr &&core_op_activated_event,
+ hailo_status &status) :
+ OutputStreamBase(layer_info, std::move(core_op_activated_event), status),
+ m_streams(std::move(streams)),
+ m_is_stream_activated(false),
+ m_next_transfer_stream_index(0),
+ m_acc_frames(0)
+ {}
+
+ virtual hailo_status read_all(MemoryView &buffer) override;
+
+ std::vector<std::reference_wrapper<VdmaOutputStream>> m_streams;
+ bool m_is_stream_activated;
+ uint32_t m_next_transfer_stream_index;
+ uint32_t m_acc_frames;
+
+private:
+ friend class VDeviceOutputStreamMultiplexerWrapper;
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_VDEVICE_STREAM_HPP_ */
--- /dev/null
+#include "vdevice/vdevice_stream_multiplexer_wrapper.hpp"
+
+namespace hailort
+{
+
+const hailo_stream_info_t &VDeviceInputStreamMultiplexerWrapper::get_info() const
+{
+ return m_vdevice_input_stream->get_info();
+}
+
+const CONTROL_PROTOCOL__nn_stream_config_t &VDeviceInputStreamMultiplexerWrapper::get_nn_stream_config()
+{
+ return m_vdevice_input_stream->get_nn_stream_config();
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ return m_vdevice_input_stream->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::deactivate_stream()
+{
+ return m_vdevice_input_stream->deactivate_stream();
+}
+
+hailo_stream_interface_t VDeviceInputStreamMultiplexerWrapper::get_interface() const
+{
+ return m_vdevice_input_stream->get_interface();
+}
+
+std::chrono::milliseconds VDeviceInputStreamMultiplexerWrapper::get_timeout() const
+{
+ return m_vdevice_input_stream->get_timeout();
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::abort()
+{
+ if (*m_is_aborted) {
+ return HAILO_SUCCESS;
+ }
+ *m_is_aborted = true;
+
+ if (is_scheduled()) {
+ auto status = m_multiplexer->disable_stream(m_core_op_multiplexer_handle, name());
+ CHECK_SUCCESS(status);
+
+ m_vdevice_input_stream->notify_all();
+
+ status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__ABORT, m_core_op_multiplexer_handle);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+ }
+
+ auto status = m_vdevice_input_stream->abort();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::clear_abort()
+{
+ if (!(*m_is_aborted)) {
+ return HAILO_SUCCESS;
+ }
+ *m_is_aborted = false;
+
+ if (is_scheduled()) {
+ auto status = m_multiplexer->enable_stream(m_core_op_multiplexer_handle, name());
+ CHECK_SUCCESS(status);
+
+ status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, m_core_op_multiplexer_handle);
+ CHECK_SUCCESS(status);
+
+ m_vdevice_input_stream->notify_all();
+
+ return HAILO_SUCCESS;
+ }
+
+ auto status = m_vdevice_input_stream->clear_abort();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+bool VDeviceInputStreamMultiplexerWrapper::is_scheduled()
+{
+ return m_vdevice_input_stream->is_scheduled();
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::send_pending_buffer(size_t device_index)
+{
+ return m_vdevice_input_stream->send_pending_buffer(device_index);
+}
+
+Expected<size_t> VDeviceInputStreamMultiplexerWrapper::get_buffer_frames_size() const
+{
+ return m_vdevice_input_stream->get_buffer_frames_size();
+}
+
+Expected<size_t> VDeviceInputStreamMultiplexerWrapper::get_pending_frames_count() const
+{
+ return m_vdevice_input_stream->get_pending_frames_count();
+}
+
+Expected<size_t> VDeviceInputStreamMultiplexerWrapper::sync_write_raw_buffer(const MemoryView &buffer)
+{
+ if (is_scheduled()) {
+ auto status = m_multiplexer->wait_for_write(m_core_op_multiplexer_handle);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ return make_unexpected(status);
+ }
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ auto exp = m_vdevice_input_stream->sync_write_raw_buffer(buffer, [this]() { return m_is_aborted->load(); });
+ if (is_scheduled()) {
+ auto status = m_multiplexer->signal_write_finish(m_core_op_multiplexer_handle, exp.status() != HAILO_SUCCESS);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+ if (HAILO_STREAM_ABORTED_BY_USER == exp.status()) {
+ return make_unexpected(exp.status());
+ }
+ CHECK_EXPECTED(exp);
+
+ return exp;
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
+{
+ ASSERT(NULL != buffer);
+
+ return sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, size)).status();
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::set_timeout(std::chrono::milliseconds timeout)
+{
+ return m_vdevice_input_stream->set_timeout(timeout);
+}
+
+hailo_status VDeviceInputStreamMultiplexerWrapper::flush()
+{
+ if (is_scheduled()) {
+ auto status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__FLUSH, m_core_op_multiplexer_handle);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+ }
+
+ return m_vdevice_input_stream->flush();
+}
+
+Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::create(std::shared_ptr<InputVDeviceBaseStream> vdevice_input_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ std::unique_ptr<VDeviceInputStreamMultiplexerWrapper> wrapper(new (std::nothrow) VDeviceInputStreamMultiplexerWrapper(vdevice_input_stream, network_name, multiplexer,
+ core_ops_scheduler_handle, core_op_multiplexer_handle, status));
+ CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return wrapper;
+}
+
+Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::clone(multiplexer_core_op_handle_t core_op_multiplexer_handle)
+{
+ auto wrapper = create(m_vdevice_input_stream, m_network_name, m_multiplexer, m_core_ops_scheduler_handle, core_op_multiplexer_handle);
+ CHECK_EXPECTED(wrapper);
+
+ return wrapper;
+}
+
+VDeviceInputStreamMultiplexerWrapper::VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<InputVDeviceBaseStream> &vdevice_input_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status) :
+ InputStreamBase(vdevice_input_stream->get_info(),
+ vdevice_input_stream->m_nn_stream_config, vdevice_input_stream->get_core_op_activated_event()),
+ m_vdevice_input_stream(vdevice_input_stream),
+ m_multiplexer(multiplexer),
+ m_core_ops_scheduler_handle(core_ops_scheduler_handle),
+ m_core_op_multiplexer_handle(core_op_multiplexer_handle),
+ m_network_name(network_name),
+ m_is_aborted()
+{
+ m_is_aborted = make_unique_nothrow<std::atomic_bool>(false);
+ if (nullptr == m_is_aborted) {
+ status = HAILO_OUT_OF_HOST_MEMORY;
+ LOGGER__ERROR("Failed to allocate memory! status = {}", status);
+ return;
+ }
+ status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__FLUSH, [this]
+ {
+ return m_vdevice_input_stream->flush();
+ });
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
+ return;
+ }
+
+ status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__ABORT, [this]
+ {
+ return m_vdevice_input_stream->abort();
+ });
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
+ return;
+ }
+
+ status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
+ {
+ return m_vdevice_input_stream->clear_abort();
+ });
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
+ return;
+ }
+}
+
+const hailo_stream_info_t &VDeviceOutputStreamMultiplexerWrapper::get_info() const
+{
+ return m_vdevice_output_stream->get_info();
+}
+
+const CONTROL_PROTOCOL__nn_stream_config_t &VDeviceOutputStreamMultiplexerWrapper::get_nn_stream_config()
+{
+ return m_vdevice_output_stream->get_nn_stream_config();
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ return m_vdevice_output_stream->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::deactivate_stream()
+{
+ return m_vdevice_output_stream->deactivate_stream();
+}
+
+hailo_stream_interface_t VDeviceOutputStreamMultiplexerWrapper::get_interface() const
+{
+ return m_vdevice_output_stream->get_interface();
+}
+
+std::chrono::milliseconds VDeviceOutputStreamMultiplexerWrapper::get_timeout() const
+{
+ return m_vdevice_output_stream->get_timeout();
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::abort()
+{
+ if (*m_is_aborted) {
+ return HAILO_SUCCESS;
+ }
+ *m_is_aborted = true;
+
+ if (is_scheduled()) {
+ auto status = m_multiplexer->disable_stream(m_core_op_multiplexer_handle, name());
+ CHECK_SUCCESS(status);
+
+ status = m_multiplexer->run_once_for_stream(name(), OUTPUT_RUN_ONCE_HANDLE__ABORT, m_core_op_multiplexer_handle);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+ }
+
+ auto status = m_vdevice_output_stream->abort();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::clear_abort()
+{
+ if (!(*m_is_aborted)) {
+ return HAILO_SUCCESS;
+ }
+ *m_is_aborted = false;
+
+ if (is_scheduled()) {
+ auto status = m_multiplexer->enable_stream(m_core_op_multiplexer_handle, name());
+ CHECK_SUCCESS(status);
+
+ status = m_multiplexer->run_once_for_stream(name(), OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, m_core_op_multiplexer_handle);
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+ }
+
+ auto status = m_vdevice_output_stream->clear_abort();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+bool VDeviceOutputStreamMultiplexerWrapper::is_scheduled()
+{
+ return m_vdevice_output_stream->is_scheduled();
+}
+
+Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::get_buffer_frames_size() const
+{
+ return m_vdevice_output_stream->get_buffer_frames_size();
+}
+Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::get_pending_frames_count() const
+{
+ return m_vdevice_output_stream->get_pending_frames_count();
+}
+
+Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::sync_read_raw_buffer(MemoryView &buffer)
+{
+ return m_vdevice_output_stream->sync_read_raw_buffer(buffer);
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::read_all(MemoryView &buffer)
+{
+ return m_vdevice_output_stream->read_all(buffer);
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::read(MemoryView buffer)
+{
+ uint32_t frames_to_drain_count = 0;
+ if (is_scheduled()) {
+ auto expected_drain_count = m_multiplexer->wait_for_read(m_core_op_multiplexer_handle, name(),
+ m_vdevice_output_stream->get_timeout());
+ if (HAILO_STREAM_ABORTED_BY_USER == expected_drain_count.status()) {
+ return expected_drain_count.status();
+ }
+ CHECK_EXPECTED_AS_STATUS(expected_drain_count);
+
+ frames_to_drain_count = expected_drain_count.release();
+ }
+
+ for (uint32_t i = 0; i < frames_to_drain_count; i++) {
+ auto status = m_vdevice_output_stream->read(buffer);
+ if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+
+ auto status = m_vdevice_output_stream->read(buffer);
+ if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
+ return status;
+ }
+ CHECK_SUCCESS(status);
+
+ if (is_scheduled()) {
+ status = m_multiplexer->signal_read_finish();
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceOutputStreamMultiplexerWrapper::set_timeout(std::chrono::milliseconds timeout)
+{
+ return m_vdevice_output_stream->set_timeout(timeout);
+}
+
+Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::create(std::shared_ptr<OutputVDeviceBaseStream> vdevice_output_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper> wrapper(new (std::nothrow) VDeviceOutputStreamMultiplexerWrapper(vdevice_output_stream, network_name, multiplexer,
+ core_ops_scheduler_handle, core_op_multiplexer_handle, status));
+ CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
+
+ return wrapper;
+}
+
+Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::clone(scheduler_core_op_handle_t core_op_multiplexer_handle)
+{
+ auto wrapper = create(m_vdevice_output_stream, m_network_name, m_multiplexer, m_core_ops_scheduler_handle, core_op_multiplexer_handle);
+ CHECK_EXPECTED(wrapper);
+
+ return wrapper;
+}
+
+VDeviceOutputStreamMultiplexerWrapper::VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<OutputVDeviceBaseStream> &vdevice_output_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status) :
+ OutputStreamBase(vdevice_output_stream->get_layer_info(), vdevice_output_stream->get_info(),
+ vdevice_output_stream->m_nn_stream_config, vdevice_output_stream->get_core_op_activated_event()),
+ m_vdevice_output_stream(vdevice_output_stream),
+ m_multiplexer(multiplexer),
+ m_core_ops_scheduler_handle(core_ops_scheduler_handle),
+ m_core_op_multiplexer_handle(core_op_multiplexer_handle),
+ m_network_name(network_name),
+ m_is_aborted()
+{
+ m_is_aborted = make_unique_nothrow<std::atomic_bool>(false);
+ if (nullptr == m_is_aborted) {
+ status = HAILO_OUT_OF_HOST_MEMORY;
+ LOGGER__ERROR("Failed to allocate memory! status = {}", status);
+ return;
+ }
+
+ status = multiplexer->register_run_once_for_stream(vdevice_output_stream->name(), OUTPUT_RUN_ONCE_HANDLE__ABORT, [this]
+ {
+ return m_vdevice_output_stream->abort();
+ });
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
+ return;
+ }
+
+ status = multiplexer->register_run_once_for_stream(vdevice_output_stream->name(), OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
+ {
+ return m_vdevice_output_stream->clear_abort();
+ });
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
+ return;
+ }
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdevice_stream_multiplexer_wrapper.hpp
+ * @brief Wrapper classes for VDeviceInputStream and VDeviceOutputStream
+ **/
+
+#ifndef HAILO_VDEVICE_STREAM_MULTIPLEXER_WRAPPER_HPP_
+#define HAILO_VDEVICE_STREAM_MULTIPLEXER_WRAPPER_HPP_
+
+#include "hailo/expected.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "vdevice/vdevice_stream.hpp"
+#include "vdevice/pipeline_multiplexer.hpp"
+
+
+namespace hailort
+{
+
+enum input_run_once_handle_t {
+ INPUT_RUN_ONCE_HANDLE__FLUSH,
+ INPUT_RUN_ONCE_HANDLE__ABORT,
+ INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT
+};
+
+enum output_run_once_handle_t {
+ OUTPUT_RUN_ONCE_HANDLE__ABORT,
+ OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT
+};
+
+class VDeviceInputStreamMultiplexerWrapper : public InputStreamBase {
+public:
+ virtual ~VDeviceInputStreamMultiplexerWrapper() = default;
+ static Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> create(std::shared_ptr<InputVDeviceBaseStream> vdevice_input_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle = 0);
+ Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> clone(multiplexer_core_op_handle_t core_op_multiplexer_handle);
+
+ virtual const hailo_stream_info_t &get_info() const override;
+ virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config() override;
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override;
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual bool is_scheduled() override;
+
+ virtual hailo_status send_pending_buffer(size_t device_index = 0) override;
+ virtual Expected<size_t> get_buffer_frames_size() const override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback) override
+ {
+ return m_vdevice_input_stream->register_interrupt_callback(callback);
+ }
+
+protected:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
+ virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
+
+private:
+ VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<InputVDeviceBaseStream> &vdevice_input_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status);
+
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+ virtual hailo_status flush() override;
+
+ std::shared_ptr<InputVDeviceBaseStream> m_vdevice_input_stream;
+ std::shared_ptr<PipelineMultiplexer> m_multiplexer;
+ scheduler_core_op_handle_t m_core_ops_scheduler_handle;
+ multiplexer_core_op_handle_t m_core_op_multiplexer_handle;
+ std::string m_network_name;
+
+ std::unique_ptr<std::atomic_bool> m_is_aborted;
+};
+
+class VDeviceOutputStreamMultiplexerWrapper : public OutputStreamBase {
+public:
+ virtual ~VDeviceOutputStreamMultiplexerWrapper() noexcept = default;
+
+ static Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> create(std::shared_ptr<OutputVDeviceBaseStream> vdevice_output_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle = 0);
+ Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> clone(multiplexer_core_op_handle_t core_op_multiplexer_handle);
+
+ virtual const hailo_stream_info_t &get_info() const override;
+ virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config() override;
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ virtual hailo_stream_interface_t get_interface() const override;
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual bool is_scheduled() override;
+ virtual Expected<size_t> get_buffer_frames_size() const override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback) override
+ {
+ return m_vdevice_output_stream->register_interrupt_callback(callback);
+ }
+
+protected:
+ virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer) override;
+
+private:
+ VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<OutputVDeviceBaseStream> &vdevice_output_stream,
+ std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+ multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status);
+
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+ virtual hailo_status read_all(MemoryView &buffer) override;
+ virtual hailo_status read(MemoryView buffer) override;
+
+ std::shared_ptr<OutputVDeviceBaseStream> m_vdevice_output_stream;
+ std::shared_ptr<PipelineMultiplexer> m_multiplexer;
+ scheduler_core_op_handle_t m_core_ops_scheduler_handle;
+ multiplexer_core_op_handle_t m_core_op_multiplexer_handle;
+ std::string m_network_name;
+ EventPtr m_read_event;
+
+ std::unique_ptr<std::atomic_bool> m_is_aborted;
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_VDEVICE_STREAM_MULTIPLEXER_WRAPPER_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_internal.hpp
- * @brief Class declaration for VDeviceBase that implements the basic VDevice "interface".
- * Hence, the hiearchy is as follows:
- *
- * VDevice (External "interface")
- * |
- * |-- VDeviceHandle (VDevice handle for a possibly shared VDeviceBase
- * | when hailort is running as single process)
- * |-- VDeviceClient (VDevice client for a possibly shared VDeviceBase
- * | when hailort is running as a service)
- * |-- VDeviceBase (Actual implementations)
- * |
- * |-- std::vector<VdmaDevice>
- **/
-
-#ifndef _HAILO_VDEVICE_INTERNAL_HPP_
-#define _HAILO_VDEVICE_INTERNAL_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/vdevice.hpp"
-#include "vdma_device.hpp"
-#include "context_switch/multi_context/vdma_config_manager.hpp"
-#include "context_switch/vdevice_network_group.hpp"
-#include "network_group_scheduler.hpp"
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#include "hailort_rpc_client.hpp"
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-namespace hailort
-{
-
-
-class VDeviceBase : public VDevice
-{
-public:
- static Expected<std::unique_ptr<VDeviceBase>> create(const hailo_vdevice_params_t ¶ms);
- VDeviceBase(VDeviceBase &&) = delete;
- VDeviceBase(const VDeviceBase &) = delete;
- VDeviceBase &operator=(VDeviceBase &&) = delete;
- VDeviceBase &operator=(const VDeviceBase &) = delete;
- virtual ~VDeviceBase() = default;
-
- virtual Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params={}) override;
-
- virtual Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override
- {
- // Return Expected for future functionality
- std::vector<std::reference_wrapper<Device>> devices_refs;
- for (auto &device : m_devices) {
- devices_refs.push_back(*device);
- }
- return devices_refs;
- }
-
- virtual Expected<std::vector<std::string>> get_physical_devices_ids() const override
- {
- std::vector<std::string> device_ids;
- device_ids.reserve(m_devices.size());
- for (auto &device : m_devices) {
- device_ids.push_back(device.get()->get_dev_id());
- }
- return device_ids;
- }
-
- const NetworkGroupSchedulerPtr &network_group_scheduler()
- {
- return m_network_group_scheduler;
- }
-
- // Currently only homogeneous vDevice is allow (= all devices are from the same type)
- virtual Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
-
- // TODO: Remove when feature becomes 'released'
- static bool enable_multi_device_schedeulr()
- {
- auto enable_multi_device_schedeulr_env = std::getenv(HAILO_ENABLE_MULTI_DEVICE_SCHEDULER);
- return ((nullptr != enable_multi_device_schedeulr_env) &&
- (strnlen(enable_multi_device_schedeulr_env, 2) == 1) && (strncmp(enable_multi_device_schedeulr_env, "1", 1) == 0));
- }
-
-private:
- VDeviceBase(std::vector<std::unique_ptr<VdmaDevice>> &&devices, NetworkGroupSchedulerPtr network_group_scheduler) :
- m_devices(std::move(devices)), m_network_group_scheduler(network_group_scheduler), m_network_groups({})
- {}
-
- static Expected<std::vector<std::unique_ptr<VdmaDevice>>> create_devices(const hailo_vdevice_params_t ¶ms);
- static Expected<std::vector<std::string>> get_device_ids(const hailo_vdevice_params_t ¶ms);
-
- std::vector<std::unique_ptr<VdmaDevice>> m_devices;
- NetworkGroupSchedulerPtr m_network_group_scheduler;
- std::vector<std::shared_ptr<VDeviceNetworkGroup>> m_network_groups;
-
- std::mutex m_mutex;
-};
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-class VDeviceClient : public VDevice
-{
-public:
- static Expected<std::unique_ptr<VDevice>> create(const hailo_vdevice_params_t ¶ms);
-
- VDeviceClient(VDeviceClient &&) = delete;
- VDeviceClient(const VDeviceClient &) = delete;
- VDeviceClient &operator=(VDeviceClient &&) = delete;
- VDeviceClient &operator=(const VDeviceClient &) = delete;
- virtual ~VDeviceClient();
-
- Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params={}) override;
-
- Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override;
-
- Expected<std::vector<std::string>> get_physical_devices_ids() const override;
- Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
-
-private:
- VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle);
-
- std::unique_ptr<HailoRtRpcClient> m_client;
- uint32_t m_handle;
-};
-
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-class VDeviceHandle : public VDevice
-{
-public:
- static Expected<std::unique_ptr<VDevice>> create(const hailo_vdevice_params_t ¶ms);
-
- VDeviceHandle(VDeviceHandle &&) = delete;
- VDeviceHandle(const VDeviceHandle &) = delete;
- VDeviceHandle &operator=(VDeviceHandle &&) = delete;
- VDeviceHandle &operator=(const VDeviceHandle &) = delete;
- virtual ~VDeviceHandle();
-
- Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
- const NetworkGroupsParamsMap &configure_params={}) override;
-
- Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override;
- Expected<std::vector<std::string>> get_physical_devices_ids() const override;
- Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
-
-private:
- VDeviceHandle(uint32_t handle);
- uint32_t m_handle;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_DEVICE_INTERNAL_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_native_stream.hpp
- * @brief Internal stream implementation for native streams
- *
- **/
-
-#ifndef HAILO_VDEVICE_NATIVE_STREAM_HPP_
-#define HAILO_VDEVICE_NATIVE_STREAM_HPP_
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "vdevice_stream.hpp"
-#include "hailo/expected.hpp"
-
-namespace hailort
-{
-
-class InputVDeviceNativeStream : public InputVDeviceBaseStream {
-public:
- InputVDeviceNativeStream(InputVDeviceNativeStream &&other) :
- InputVDeviceBaseStream(std::move(other))
- {}
-
- explicit InputVDeviceNativeStream(
- std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
- EventPtr &&network_group_activated_event,
- const LayerInfo &layer_info,
- hailo_status &status) :
- InputVDeviceBaseStream(std::move(streams), std::move(network_group_activated_event), layer_info, status)
- {}
-
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual bool is_scheduled() override { return false; };
-
-protected:
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer,
- const std::function<bool()> &should_cancel = []() { return false; }) override;
-};
-
-class OutputVDeviceNativeStream : public OutputVDeviceBaseStream {
-public:
- OutputVDeviceNativeStream(OutputVDeviceNativeStream &&other) :
- OutputVDeviceBaseStream(std::move(other))
- {}
-
- explicit OutputVDeviceNativeStream(
- std::vector<std::reference_wrapper<VdmaOutputStream>> &&streams,
- const LayerInfo &layer_info,
- EventPtr &&network_group_activated_event,
- hailo_status &status) :
- OutputVDeviceBaseStream(std::move(streams), layer_info, std::move(network_group_activated_event), status)
- {}
-
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual bool is_scheduled() override { return false; };
-
-protected:
- virtual hailo_status read(MemoryView buffer) override;;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_VDEVICE_NATIVE_STREAM_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_stream.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include <new>
-
-#include "hailo/hailort.h"
-#include "common/utils.hpp"
-#include "hailo/stream.hpp"
-#include "hailo/hef.hpp"
-#include "hailo/hailort_common.hpp"
-#include "tracer_macros.hpp"
-#include "vdevice_stream.hpp"
-#include "scheduled_stream.hpp"
-#include "vdevice_native_stream.hpp"
-#include "context_switch/multi_context/resource_manager.hpp"
-#include "multi_device_scheduled_stream.hpp"
-
-namespace hailort
-{
-
-hailo_status InputVDeviceBaseStream::deactivate_stream()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto deactivate_status = stream.get().deactivate_stream();
- if (HAILO_SUCCESS != deactivate_status) {
- LOGGER__ERROR("Failed to deactivate input stream. (status: {} device: {})", deactivate_status, stream.get().get_dev_id());
- status = deactivate_status;
- }
- }
- m_is_stream_activated = false;
- return status;
-}
-
-/** Input stream **/
-InputVDeviceBaseStream::~InputVDeviceBaseStream()
-{
- // We want to stop the vdma channel before closing the stream in the firmware
- // because sending data to a closed stream may terminate the dma engine
- if (m_is_stream_activated) {
- (void)deactivate_stream();
- }
-}
-
-hailo_status InputVDeviceBaseStream::activate_stream(uint16_t dynamic_batch_size)
-{
- for (auto &stream : m_streams) {
- auto status = stream.get().activate_stream(dynamic_batch_size);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to activate input stream. (device: {})", stream.get().get_dev_id());
- deactivate_stream();
- return status;
- }
- }
- m_is_stream_activated = true;
- return HAILO_SUCCESS;
-}
-
-hailo_status InputVDeviceBaseStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
-{
- ASSERT(NULL != buffer);
-
- return sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, size)).status();
-}
-
-hailo_status InputVDeviceBaseStream::send_pending_buffer(size_t device_index)
-{
- assert(1 == m_streams.size());
- CHECK(0 == device_index, HAILO_INVALID_OPERATION);
- VdmaInputStream &vdma_input = static_cast<VdmaInputStream&>(m_streams[m_next_transfer_stream_index].get());
- return vdma_input.send_pending_buffer();
-}
-
-Expected<size_t> InputVDeviceBaseStream::get_buffer_frames_size() const
-{
- size_t total_buffers_size = 0;
- for (auto &stream : m_streams) {
- auto stream_buffer_size = stream.get().get_buffer_frames_size();
- CHECK_EXPECTED(stream_buffer_size);
- total_buffers_size += stream_buffer_size.value();
- }
-
- return total_buffers_size;
-}
-
-Expected<size_t> InputVDeviceBaseStream::get_pending_frames_count() const
-{
- size_t total_pending_frames_count = 0;
- for (auto &stream : m_streams) {
- auto stream_pending_frames_count = stream.get().get_pending_frames_count();
- CHECK_EXPECTED(stream_pending_frames_count);
- total_pending_frames_count += stream_pending_frames_count.value();
- }
-
- return total_pending_frames_count;
-}
-
-Expected<std::unique_ptr<InputVDeviceBaseStream>> InputVDeviceBaseStream::create(std::vector<std::reference_wrapper<VdmaInputStream>> &&low_level_streams,
- const LayerInfo &edge_layer, const scheduler_ng_handle_t &network_group_handle,
- EventPtr network_group_activated_event, NetworkGroupSchedulerWeakPtr network_group_scheduler)
-{
- assert(0 < low_level_streams.size());
- auto status = HAILO_UNINITIALIZED;
-
- std::unique_ptr<InputVDeviceBaseStream> local_vdevice_stream;
-
- if (network_group_scheduler.lock()) {
- if (1 < low_level_streams.size()) {
- const auto batch_size = low_level_streams[0].get().get_dynamic_batch_size();
- auto frame_size = low_level_streams[0].get().get_frame_size();
- auto buffers_queue_ptr = BuffersQueue::create_unique(frame_size, (low_level_streams.size() * batch_size));
- CHECK_EXPECTED(buffers_queue_ptr);
-
- local_vdevice_stream = make_unique_nothrow<MultiDeviceScheduledInputStream>(std::move(low_level_streams),
- network_group_handle, std::move(network_group_activated_event), edge_layer,
- network_group_scheduler, buffers_queue_ptr.release(), status);
- } else {
- local_vdevice_stream = make_unique_nothrow<ScheduledInputStream>(std::move(low_level_streams),
- network_group_handle, std::move(network_group_activated_event), edge_layer,
- network_group_scheduler, status);
- }
- } else {
- local_vdevice_stream = make_unique_nothrow<InputVDeviceNativeStream>(std::move(low_level_streams),
- std::move(network_group_activated_event), edge_layer,status);
- }
-
- CHECK_AS_EXPECTED((nullptr != local_vdevice_stream), HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return local_vdevice_stream;
-}
-
-hailo_status InputVDeviceBaseStream::set_timeout(std::chrono::milliseconds timeout)
-{
- for (auto &stream : m_streams) {
- auto status = stream.get().set_timeout(timeout);
- CHECK_SUCCESS(status, "Failed to set timeout to input stream. (device: {})", stream.get().get_dev_id());
- }
- return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds InputVDeviceBaseStream::get_timeout() const
-{
- // All timeout values of m_streams should be the same
- return m_streams[0].get().get_timeout();
-}
-
-hailo_stream_interface_t InputVDeviceBaseStream::get_interface() const
-{
- // All interface values of m_streams should be the same
- return m_streams[0].get().get_interface();
-}
-
-hailo_status InputVDeviceBaseStream::flush()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto flush_status = stream.get().flush();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to flush input stream. (status: {} device: {})", status, stream.get().get_dev_id());
- status = flush_status;
- }
- }
- return status;
-}
-
-Expected<size_t> ScheduledInputStream::sync_write_raw_buffer(const MemoryView &buffer, const std::function<bool()> &should_cancel)
-{
- return sync_write_raw_buffer_impl(buffer, m_network_group_handle, should_cancel);
-}
-
-Expected<size_t> InputVDeviceNativeStream::sync_write_raw_buffer(const MemoryView &buffer, const std::function<bool()> &should_cancel)
-{
- if (should_cancel()) {
- return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
- }
-
- auto expected_written_bytes = m_streams[m_next_transfer_stream_index].get().sync_write_raw_buffer(buffer);
- if (HAILO_SUCCESS != expected_written_bytes.status()) {
- LOGGER__INFO("Write to stream has failed! status = {}", expected_written_bytes.status());
- return make_unexpected(expected_written_bytes.status());
- }
- auto written_bytes = expected_written_bytes.value();
-
- // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
- if (0 == (++m_acc_frames % m_streams[0].get().get_dynamic_batch_size())) {
- m_next_transfer_stream_index = static_cast<uint32_t>((m_next_transfer_stream_index + 1) % m_streams.size());
- m_acc_frames = 0;
- }
- return written_bytes;
-}
-
-Expected<size_t> ScheduledInputStream::sync_write_raw_buffer_impl(const MemoryView &buffer, scheduler_ng_handle_t network_group_handle,
- const std::function<bool()> &should_cancel)
-{
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK_AS_EXPECTED(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto status = network_group_scheduler->wait_for_write(network_group_handle, name(), get_timeout(), should_cancel);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Write to stream was aborted.");
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- TRACE(WriteFrameTrace, "", network_group_handle, m_stream_info.name);
-
- assert(1 == m_streams.size());
- status = m_streams[0].get().write_buffer_only(buffer, should_cancel);
- if (HAILO_SUCCESS != status) {
- LOGGER__INFO("Write to stream has failed! status = {}", status);
- network_group_scheduler->mark_failed_write(network_group_handle, name());
- return make_unexpected(status);
- }
-
- status = network_group_scheduler->signal_write_finish(network_group_handle, name());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- auto written_bytes = buffer.size();
-
- return written_bytes;
-}
-
-hailo_status ScheduledInputStream::abort()
-{
- return abort_impl(m_network_group_handle);
-}
-
-hailo_status InputVDeviceNativeStream::abort()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto abort_status = stream.get().abort();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, stream.get().get_dev_id());
- status = abort_status;
- }
- }
-
- return status;
-}
-
-hailo_status ScheduledInputStream::abort_impl(scheduler_ng_handle_t network_group_handle)
-{
- auto status = HAILO_SUCCESS; // Best effort
- assert(1 == m_streams.size());
- auto abort_status = m_streams[0].get().abort();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, m_streams[0].get().get_dev_id());
- status = abort_status;
- }
-
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto disable_status = network_group_scheduler->disable_stream(network_group_handle, name());
- if (HAILO_SUCCESS != disable_status) {
- LOGGER__ERROR("Failed to disable stream in the network group scheduler. (status: {})", disable_status);
- status = disable_status;
- }
-
- return status;
-}
-
-hailo_status ScheduledInputStream::clear_abort()
-{
- return clear_abort_impl(m_network_group_handle);
-}
-
-hailo_status InputVDeviceNativeStream::clear_abort()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto clear_abort_status = stream.get().clear_abort();
- if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
- LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
- status = clear_abort_status;
- }
- }
-
- return status;
-}
-
-hailo_status ScheduledInputStream::clear_abort_impl(scheduler_ng_handle_t network_group_handle)
-{
- auto status = HAILO_SUCCESS; // Best effort
- assert(1 == m_streams.size());
- auto clear_abort_status = m_streams[0].get().clear_abort();
- if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
- LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, m_streams[0].get().get_dev_id());
- status = clear_abort_status;
- }
-
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto enable_status = network_group_scheduler->enable_stream(network_group_handle, name());
- if (HAILO_SUCCESS != enable_status) {
- LOGGER__ERROR("Failed to enable stream in the network group scheduler. (status: {})", enable_status);
- status = enable_status;
- }
-
- return status;
-}
-
-/** Output stream **/
-hailo_status OutputVDeviceBaseStream::deactivate_stream()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto deactivate_status = stream.get().deactivate_stream();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to deactivate output stream. (status: {} device: {})", status, stream.get().get_dev_id());
- status = deactivate_status;
- }
- }
- m_is_stream_activated = false;
- return status;
-}
-
-OutputVDeviceBaseStream::~OutputVDeviceBaseStream()
-{
- // We want to stop the vdma channel before closing the stream in the firmware
- // because sending data to a closed stream may terminate the dma engine
- if (m_is_stream_activated) {
- (void)deactivate_stream();
- }
-}
-
-hailo_status OutputVDeviceBaseStream::activate_stream(uint16_t dynamic_batch_size)
-{
- for (auto &stream : m_streams) {
- auto status = stream.get().activate_stream(dynamic_batch_size);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to activate output stream. (device: {})", stream.get().get_dev_id());
- deactivate_stream();
- return status;
- }
- }
- m_is_stream_activated = true;
- return HAILO_SUCCESS;
-}
-
-hailo_status OutputVDeviceBaseStream::read_all(MemoryView &/*buffer*/)
-{
- LOGGER__ERROR("read_all should not be called in vdevice flow");
- return HAILO_INTERNAL_FAILURE;
-}
-
-Expected<size_t> OutputVDeviceBaseStream::sync_read_raw_buffer(MemoryView &/*buffer*/)
-{
- LOGGER__ERROR("sync_read_raw_buffer should not be called in vdevice flow");
- return make_unexpected(HAILO_INTERNAL_FAILURE);
-}
-
-hailo_status ScheduledOutputStream::read(MemoryView buffer)
-{
- return read_impl(buffer, m_network_group_handle);
-}
-
-hailo_status OutputVDeviceNativeStream::read(MemoryView buffer)
-{
- auto status = m_streams[m_next_transfer_stream_index].get().read(buffer);
- if (HAILO_SUCCESS != status) {
- LOGGER__INFO("Read from stream has failed! status = {}", status);
- return status;
- }
-
- // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
- if (0 == (++m_acc_frames % m_streams[0].get().get_dynamic_batch_size())) {
- m_next_transfer_stream_index = static_cast<uint32_t>((m_next_transfer_stream_index + 1) % m_streams.size());
- m_acc_frames = 0;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status ScheduledOutputStream::read_impl(MemoryView buffer, scheduler_ng_handle_t network_group_handle)
-{
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto device_id = network_group_scheduler->wait_for_read(network_group_handle, name(), get_timeout());
- if (HAILO_STREAM_ABORTED_BY_USER == device_id.status()) {
- LOGGER__INFO("Read from stream was aborted.");
- return device_id.status();
- }
- CHECK_EXPECTED_AS_STATUS(device_id);
-
- TRACE(ReadFrameTrace, "", network_group_handle, m_stream_info.name);
- auto status = m_streams[device_id.value()].get().read(buffer);
- if (HAILO_SUCCESS != status) {
- LOGGER__INFO("Read from stream has failed! status = {}", status);
- return status;
- }
-
- status = network_group_scheduler->signal_read_finish(network_group_handle, name(), device_id.value());
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return status;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<std::unique_ptr<OutputVDeviceBaseStream>> OutputVDeviceBaseStream::create(std::vector<std::reference_wrapper<VdmaOutputStream>> &&low_level_streams,
- const LayerInfo &edge_layer, const scheduler_ng_handle_t &network_group_handle, EventPtr network_group_activated_event,
- NetworkGroupSchedulerWeakPtr network_group_scheduler)
-{
- assert(0 < low_level_streams.size());
- auto status = HAILO_UNINITIALIZED;
-
- std::unique_ptr<OutputVDeviceBaseStream> local_vdevice_stream;
- if (network_group_scheduler.lock()) {
- local_vdevice_stream = make_unique_nothrow<ScheduledOutputStream>(std::move(low_level_streams), network_group_handle,
- edge_layer, std::move(network_group_activated_event), network_group_scheduler, status);
- } else {
- local_vdevice_stream = make_unique_nothrow<OutputVDeviceNativeStream>(std::move(low_level_streams), edge_layer,
- std::move(network_group_activated_event), status);
- }
-
- CHECK_AS_EXPECTED((nullptr != local_vdevice_stream), HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return local_vdevice_stream;
-}
-
-hailo_status OutputVDeviceBaseStream::set_timeout(std::chrono::milliseconds timeout)
-{
- for (auto &stream : m_streams) {
- auto status = stream.get().set_timeout(timeout);
- CHECK_SUCCESS(status, "Failed to set timeout to output stream. (device: {})", stream.get().get_dev_id());
- }
- return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds OutputVDeviceBaseStream::get_timeout() const
-{
- // All timeout values of m_streams should be the same
- return m_streams[0].get().get_timeout();
-}
-
-hailo_stream_interface_t OutputVDeviceBaseStream::get_interface() const
-{
- // All interface values of m_streams should be the same
- return m_streams[0].get().get_interface();
-}
-
-hailo_status ScheduledOutputStream::abort()
-{
- return abort_impl(m_network_group_handle);
-}
-
-hailo_status OutputVDeviceNativeStream::abort()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto abort_status = stream.get().abort();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, stream.get().get_dev_id());
- status = abort_status;
- }
- }
-
- return status;
-}
-
-hailo_status ScheduledOutputStream::abort_impl(scheduler_ng_handle_t network_group_handle)
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto& stream : m_streams) {
- auto abort_status = stream.get().abort();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, stream.get().get_dev_id());
- status = abort_status;
- }
- }
-
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto disable_status = network_group_scheduler->disable_stream(network_group_handle, name());
- if (HAILO_SUCCESS != disable_status) {
- LOGGER__ERROR("Failed to disable stream in the network group scheduler. (status: {})", disable_status);
- status = disable_status;
- }
-
- return status;
-}
-
-hailo_status ScheduledOutputStream::clear_abort()
-{
- return clear_abort_impl(m_network_group_handle);
-}
-
-hailo_status OutputVDeviceNativeStream::clear_abort()
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto &stream : m_streams) {
- auto clear_abort_status = stream.get().clear_abort();
- if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
- LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
- status = clear_abort_status;
- }
- }
-
- return status;
-}
-
-hailo_status ScheduledOutputStream::clear_abort_impl(scheduler_ng_handle_t network_group_handle)
-{
- auto status = HAILO_SUCCESS; // Best effort
- for (auto& stream : m_streams) {
- auto clear_abort_status = stream.get().clear_abort();
- if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
- LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
- status = clear_abort_status;
- }
- }
-
- auto network_group_scheduler = m_network_group_scheduler.lock();
- CHECK(network_group_scheduler, HAILO_INTERNAL_FAILURE);
-
- auto enable_status = network_group_scheduler->enable_stream(network_group_handle, name());
- if (HAILO_SUCCESS != enable_status) {
- LOGGER__ERROR("Failed to enable stream in the network group scheduler. (status: {})", enable_status);
- status = enable_status;
- }
-
- return status;
-}
-
-Expected<size_t> OutputVDeviceBaseStream::get_buffer_frames_size() const
-{
- size_t total_buffers_size = 0;
- for (auto &stream : m_streams) {
- auto stream_buffer_size = stream.get().get_buffer_frames_size();
- if (HAILO_NOT_AVAILABLE == stream_buffer_size.status()) {
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
- CHECK_EXPECTED(stream_buffer_size);
- total_buffers_size += stream_buffer_size.value();
- }
-
- return total_buffers_size;
-}
-
-Expected<size_t> OutputVDeviceBaseStream::get_pending_frames_count() const
-{
- size_t total_pending_frames_count = 0;
- for (auto &stream : m_streams) {
- auto stream_pending_frames_count = stream.get().get_pending_frames_count();
- if (HAILO_NOT_AVAILABLE == stream_pending_frames_count.status()) {
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
- CHECK_EXPECTED(stream_pending_frames_count);
- total_pending_frames_count += stream_pending_frames_count.value();
- }
-
- return total_pending_frames_count;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_stream.hpp
- * @brief Internal stream implementation for VDevice
- *
- * InputStream (External "interface")
- * |-- InputStreamBase (Base class)
- * |-- InputVDeviceBaseStream (Base class for vdevice streams)
- * | |-- InputVDeviceNativeStream
- * | |-- ScheduledInputStream
- *
- * OutputStream (External "interface")
- * |-- OutputStreamBase (Base class)
- * |-- OutputVDeviceBaseStream (Base class for vdevice streams)
- * | |-- OutputVDeviceNativeStream
- * | |-- ScheduledOutputStream
- **/
-
-#ifndef HAILO_VDEVICE_STREAM_HPP_
-#define HAILO_VDEVICE_STREAM_HPP_
-
-#include "stream_internal.hpp"
-#include "hailo/hailort.h"
-#include "vdevice_internal.hpp"
-#include "vdma_device.hpp"
-#include "vdma_stream.hpp"
-#include "hailo/expected.hpp"
-
-namespace hailort
-{
-
-class InputVDeviceBaseStream : public InputStreamBase {
-
-public:
- static Expected<std::unique_ptr<InputVDeviceBaseStream>> create(std::vector<std::reference_wrapper<VdmaInputStream>> &&low_level_streams,
- const LayerInfo &edge_layer, const scheduler_ng_handle_t &network_group_handle,
- EventPtr network_group_activated_event, NetworkGroupSchedulerWeakPtr network_group_scheduler);
-
- InputVDeviceBaseStream(InputVDeviceBaseStream &&other) :
- InputStreamBase(std::move(other)),
- m_streams(std::move(other.m_streams)),
- m_is_stream_activated(std::exchange(other.m_is_stream_activated, false)),
- m_next_transfer_stream_index(other.m_next_transfer_stream_index),
- m_acc_frames(other.m_acc_frames)
- {}
-
- virtual ~InputVDeviceBaseStream();
-
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override;
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
-
- virtual hailo_status send_pending_buffer(size_t device_index = 0) override;
- virtual Expected<size_t> get_buffer_frames_size() const override;
- virtual Expected<size_t> get_pending_frames_count() const override;
- virtual bool is_scheduled() override = 0;
- virtual hailo_status abort() override = 0;
- virtual hailo_status clear_abort() override = 0;
-
- virtual void notify_all()
- {
- // Overriden in scheduled_stream
- return;
- }
-
-protected:
- virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override
- {
- return sync_write_raw_buffer(buffer, []() { return false; });
- }
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer, const std::function<bool()> &should_cancel) = 0;
-
- explicit InputVDeviceBaseStream(
- std::vector<std::reference_wrapper<VdmaInputStream>> &&streams,
- EventPtr &&network_group_activated_event,
- const LayerInfo &layer_info,
- hailo_status &status) :
- InputStreamBase(layer_info, streams[0].get().get_interface(), std::move(network_group_activated_event), status),
- m_streams(std::move(streams)),
- m_is_stream_activated(false),
- m_next_transfer_stream_index(0),
- m_acc_frames(0)
- {}
-
- std::vector<std::reference_wrapper<VdmaInputStream>> m_streams;
- bool m_is_stream_activated;
- uint32_t m_next_transfer_stream_index;
- uint32_t m_acc_frames;
-
-private:
- friend class VDeviceInputStreamMultiplexerWrapper;
-
- virtual hailo_status flush() override;
-};
-
-class OutputVDeviceBaseStream : public OutputStreamBase {
-public:
- OutputVDeviceBaseStream(OutputVDeviceBaseStream &&other) :
- OutputStreamBase(std::move(other)),
- m_streams(std::move(other.m_streams)),
- m_is_stream_activated(std::exchange(other.m_is_stream_activated, false)),
- m_next_transfer_stream_index(other.m_next_transfer_stream_index),
- m_acc_frames(other.m_acc_frames)
- {}
-
- virtual ~OutputVDeviceBaseStream();
-
- static Expected<std::unique_ptr<OutputVDeviceBaseStream>> create(std::vector<std::reference_wrapper<VdmaOutputStream>> &&low_level_streams,
- const LayerInfo &edge_layer, const scheduler_ng_handle_t &network_group_handle,
- EventPtr network_group_activated_event, NetworkGroupSchedulerWeakPtr network_group_scheduler);
-
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override;
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
- virtual Expected<size_t> get_buffer_frames_size() const override;
- virtual Expected<size_t> get_pending_frames_count() const override;
- virtual hailo_status abort() override = 0;
- virtual hailo_status clear_abort() override = 0;
- virtual bool is_scheduled() override = 0;
-
- virtual hailo_status register_for_d2h_interrupts(const std::function<void(uint32_t)> &callback) override
- {
- for (auto &stream : m_streams) {
- auto status = stream.get().register_for_d2h_interrupts(callback);
- CHECK_SUCCESS(status);
- }
- return HAILO_SUCCESS;
- }
-
-protected:
- virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer) override;
-
- explicit OutputVDeviceBaseStream(
- std::vector<std::reference_wrapper<VdmaOutputStream>> &&streams,
- const LayerInfo &layer_info,
- EventPtr &&network_group_activated_event,
- hailo_status &status) :
- OutputStreamBase(layer_info, std::move(network_group_activated_event), status),
- m_streams(std::move(streams)),
- m_is_stream_activated(false),
- m_next_transfer_stream_index(0),
- m_acc_frames(0)
- {}
-
- virtual hailo_status read_all(MemoryView &buffer) override;
-
- std::vector<std::reference_wrapper<VdmaOutputStream>> m_streams;
- bool m_is_stream_activated;
- uint32_t m_next_transfer_stream_index;
- uint32_t m_acc_frames;
-
-private:
- friend class VDeviceOutputStreamMultiplexerWrapper;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_VDEVICE_STREAM_HPP_ */
+++ /dev/null
-#include "vdevice_stream_multiplexer_wrapper.hpp"
-
-namespace hailort
-{
-
-const hailo_stream_info_t &VDeviceInputStreamMultiplexerWrapper::get_info() const
-{
- return m_vdevice_input_stream->get_info();
-}
-
-const CONTROL_PROTOCOL__nn_stream_config_t &VDeviceInputStreamMultiplexerWrapper::get_nn_stream_config()
-{
- return m_vdevice_input_stream->get_nn_stream_config();
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::activate_stream(uint16_t dynamic_batch_size)
-{
- return m_vdevice_input_stream->activate_stream(dynamic_batch_size);
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::deactivate_stream()
-{
- return m_vdevice_input_stream->deactivate_stream();
-}
-
-hailo_stream_interface_t VDeviceInputStreamMultiplexerWrapper::get_interface() const
-{
- return m_vdevice_input_stream->get_interface();
-}
-
-std::chrono::milliseconds VDeviceInputStreamMultiplexerWrapper::get_timeout() const
-{
- return m_vdevice_input_stream->get_timeout();
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::abort()
-{
- if (is_scheduled()) {
- auto status = m_multiplexer->disable_network_group(m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- *m_is_aborted = true;
- m_vdevice_input_stream->notify_all();
-
- // TODO: HRT-7638
- status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__ABORT, m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
- }
-
- auto status = m_vdevice_input_stream->abort();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::clear_abort()
-{
- if (is_scheduled()) {
- auto status = m_multiplexer->enable_network_group(m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- *m_is_aborted = false;
-
- status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- m_vdevice_input_stream->notify_all();
-
- return HAILO_SUCCESS;
- }
-
- auto status = m_vdevice_input_stream->clear_abort();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-bool VDeviceInputStreamMultiplexerWrapper::is_scheduled()
-{
- return m_vdevice_input_stream->is_scheduled();
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::send_pending_buffer(size_t device_index)
-{
- return m_vdevice_input_stream->send_pending_buffer(device_index);
-}
-
-Expected<size_t> VDeviceInputStreamMultiplexerWrapper::get_buffer_frames_size() const
-{
- return m_vdevice_input_stream->get_buffer_frames_size();
-}
-
-Expected<size_t> VDeviceInputStreamMultiplexerWrapper::get_pending_frames_count() const
-{
- return m_vdevice_input_stream->get_pending_frames_count();
-}
-
-Expected<size_t> VDeviceInputStreamMultiplexerWrapper::sync_write_raw_buffer(const MemoryView &buffer)
-{
- if (is_scheduled()) {
- auto status = m_multiplexer->wait_for_write(m_network_group_multiplexer_handle);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return make_unexpected(status);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- auto exp = m_vdevice_input_stream->sync_write_raw_buffer(buffer, [this]() { return m_is_aborted->load(); });
- if (HAILO_STREAM_ABORTED_BY_USER == exp.status()) {
- return make_unexpected(exp.status());
- }
- CHECK_EXPECTED(exp);
-
- if (is_scheduled()) {
- auto status = m_multiplexer->signal_write_finish(m_network_group_multiplexer_handle);
- CHECK_SUCCESS_AS_EXPECTED(status);
- }
-
- return exp;
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
-{
- ASSERT(NULL != buffer);
-
- return sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, size)).status();
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::set_timeout(std::chrono::milliseconds timeout)
-{
- return m_vdevice_input_stream->set_timeout(timeout);
-}
-
-hailo_status VDeviceInputStreamMultiplexerWrapper::flush()
-{
- if (is_scheduled()) {
- auto status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__FLUSH, m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
- }
-
- return m_vdevice_input_stream->flush();
-}
-
-Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::create(std::shared_ptr<InputVDeviceBaseStream> vdevice_input_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- std::unique_ptr<VDeviceInputStreamMultiplexerWrapper> wrapper(new (std::nothrow) VDeviceInputStreamMultiplexerWrapper(vdevice_input_stream, network_name, multiplexer,
- network_group_scheduler_handle, network_group_multiplexer_handle, status));
- CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return wrapper;
-}
-
-Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::clone(multiplexer_ng_handle_t network_group_multiplexer_handle)
-{
- auto wrapper = create(m_vdevice_input_stream, m_network_name, m_multiplexer, m_network_group_scheduler_handle, network_group_multiplexer_handle);
- CHECK_EXPECTED(wrapper);
-
- return wrapper;
-}
-
-VDeviceInputStreamMultiplexerWrapper::VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<InputVDeviceBaseStream> &vdevice_input_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle, hailo_status &status) :
- InputStreamBase(vdevice_input_stream->get_info(),
- vdevice_input_stream->m_nn_stream_config, vdevice_input_stream->get_network_group_activated_event()),
- m_vdevice_input_stream(vdevice_input_stream),
- m_multiplexer(multiplexer),
- m_network_group_scheduler_handle(network_group_scheduler_handle),
- m_network_group_multiplexer_handle(network_group_multiplexer_handle),
- m_network_name(network_name),
- m_is_aborted()
-{
- m_is_aborted = make_unique_nothrow<std::atomic_bool>(false);
- if (nullptr == m_is_aborted) {
- status = HAILO_OUT_OF_HOST_MEMORY;
- LOGGER__ERROR("Failed to allocate memory! status = {}", status);
- return;
- }
- status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__FLUSH, [this]
- {
- return m_vdevice_input_stream->flush();
- });
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
- return;
- }
-
- status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__ABORT, [this]
- {
- return m_vdevice_input_stream->abort();
- });
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
- return;
- }
-
- status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
- {
- return m_vdevice_input_stream->clear_abort();
- });
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
- return;
- }
-}
-
-const hailo_stream_info_t &VDeviceOutputStreamMultiplexerWrapper::get_info() const
-{
- return m_vdevice_output_stream->get_info();
-}
-
-const CONTROL_PROTOCOL__nn_stream_config_t &VDeviceOutputStreamMultiplexerWrapper::get_nn_stream_config()
-{
- return m_vdevice_output_stream->get_nn_stream_config();
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::activate_stream(uint16_t dynamic_batch_size)
-{
- return m_vdevice_output_stream->activate_stream(dynamic_batch_size);
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::deactivate_stream()
-{
- return m_vdevice_output_stream->deactivate_stream();
-}
-
-hailo_stream_interface_t VDeviceOutputStreamMultiplexerWrapper::get_interface() const
-{
- return m_vdevice_output_stream->get_interface();
-}
-
-std::chrono::milliseconds VDeviceOutputStreamMultiplexerWrapper::get_timeout() const
-{
- return m_vdevice_output_stream->get_timeout();
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::abort()
-{
- if (is_scheduled()) {
- auto status = m_multiplexer->disable_network_group(m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- // TODO: HRT-7638
- status = m_multiplexer->run_once_for_stream(name(), OUTPUT_RUN_ONCE_HANDLE__ABORT, m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
- }
-
- auto status = m_vdevice_output_stream->abort();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::clear_abort()
-{
- if (is_scheduled()) {
- auto status = m_multiplexer->enable_network_group(m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- status = m_multiplexer->run_once_for_stream(name(), OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, m_network_group_multiplexer_handle);
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
- }
-
- auto status = m_vdevice_output_stream->clear_abort();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-bool VDeviceOutputStreamMultiplexerWrapper::is_scheduled()
-{
- return m_vdevice_output_stream->is_scheduled();
-}
-
-Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::get_buffer_frames_size() const
-{
- return m_vdevice_output_stream->get_buffer_frames_size();
-}
-Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::get_pending_frames_count() const
-{
- return m_vdevice_output_stream->get_pending_frames_count();
-}
-
-Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::sync_read_raw_buffer(MemoryView &buffer)
-{
- return m_vdevice_output_stream->sync_read_raw_buffer(buffer);
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::read_all(MemoryView &buffer)
-{
- return m_vdevice_output_stream->read_all(buffer);
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::read(MemoryView buffer)
-{
- uint32_t frames_to_drain_count = 0;
- if (is_scheduled()) {
- auto expected_drain_count = m_multiplexer->wait_for_read(m_network_group_multiplexer_handle, name(),
- m_vdevice_output_stream->get_timeout());
- if (HAILO_STREAM_ABORTED_BY_USER == expected_drain_count.status()) {
- return expected_drain_count.status();
- }
- CHECK_EXPECTED_AS_STATUS(expected_drain_count);
-
- frames_to_drain_count = expected_drain_count.release();
- }
-
- for (uint32_t i = 0; i < frames_to_drain_count; i++) {
- auto status = m_vdevice_output_stream->read(buffer);
- if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
- return status;
- }
- CHECK_SUCCESS(status);
- }
-
- auto status = m_vdevice_output_stream->read(buffer);
- if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
- return status;
- }
- CHECK_SUCCESS(status);
-
- if (is_scheduled()) {
- status = m_multiplexer->signal_read_finish(m_network_group_multiplexer_handle);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return status;
- }
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::set_timeout(std::chrono::milliseconds timeout)
-{
- return m_vdevice_output_stream->set_timeout(timeout);
-}
-
-Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::create(std::shared_ptr<OutputVDeviceBaseStream> vdevice_output_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper> wrapper(new (std::nothrow) VDeviceOutputStreamMultiplexerWrapper(vdevice_output_stream, network_name, multiplexer,
- network_group_scheduler_handle, network_group_multiplexer_handle, status));
- CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
-
- return wrapper;
-}
-
-Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::clone(scheduler_ng_handle_t network_group_multiplexer_handle)
-{
- auto wrapper = create(m_vdevice_output_stream, m_network_name, m_multiplexer, m_network_group_scheduler_handle, network_group_multiplexer_handle);
- CHECK_EXPECTED(wrapper);
-
- return wrapper;
-}
-
-VDeviceOutputStreamMultiplexerWrapper::VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<OutputVDeviceBaseStream> &vdevice_output_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle, hailo_status &status) :
- OutputStreamBase(vdevice_output_stream->get_layer_info(), vdevice_output_stream->get_info(),
- vdevice_output_stream->m_nn_stream_config, vdevice_output_stream->get_network_group_activated_event()),
- m_vdevice_output_stream(vdevice_output_stream),
- m_multiplexer(multiplexer),
- m_network_group_scheduler_handle(network_group_scheduler_handle),
- m_network_group_multiplexer_handle(network_group_multiplexer_handle),
- m_network_name(network_name)
-{
- status = multiplexer->register_run_once_for_stream(vdevice_output_stream->name(), OUTPUT_RUN_ONCE_HANDLE__ABORT, [this]
- {
- return m_vdevice_output_stream->abort();
- });
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
- return;
- }
-
- status = multiplexer->register_run_once_for_stream(vdevice_output_stream->name(), OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
- {
- return m_vdevice_output_stream->clear_abort();
- });
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
- return;
- }
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_stream_multiplexer_wrapper.hpp
- * @brief Wrapper classes for VDeviceInputStream and VDeviceOutputStream
- **/
-
-#ifndef HAILO_VDEVICE_STREAM_MULTIPLEXER_WRAPPER_HPP_
-#define HAILO_VDEVICE_STREAM_MULTIPLEXER_WRAPPER_HPP_
-
-#include "vdevice_stream.hpp"
-#include "stream_internal.hpp"
-#include "hailo/expected.hpp"
-#include "pipeline_multiplexer.hpp"
-
-namespace hailort
-{
-
-enum input_run_once_handle_t {
- INPUT_RUN_ONCE_HANDLE__FLUSH,
- INPUT_RUN_ONCE_HANDLE__ABORT,
- INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT
-};
-
-enum output_run_once_handle_t {
- OUTPUT_RUN_ONCE_HANDLE__ABORT,
- OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT
-};
-
-class VDeviceInputStreamMultiplexerWrapper : public InputStreamBase {
-public:
- virtual ~VDeviceInputStreamMultiplexerWrapper() = default;
- VDeviceInputStreamMultiplexerWrapper(const VDeviceInputStreamMultiplexerWrapper &other) = delete;
- VDeviceInputStreamMultiplexerWrapper &operator=(const VDeviceInputStreamMultiplexerWrapper &other) = delete;
- VDeviceInputStreamMultiplexerWrapper &operator=(VDeviceInputStreamMultiplexerWrapper &&other) = delete;
- VDeviceInputStreamMultiplexerWrapper(VDeviceInputStreamMultiplexerWrapper &&other) = default;
-
- static Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> create(std::shared_ptr<InputVDeviceBaseStream> vdevice_input_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle = 0);
- Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> clone(multiplexer_ng_handle_t network_group_multiplexer_handle);
-
- virtual const hailo_stream_info_t &get_info() const override;
- virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config() override;
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override;
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual bool is_scheduled() override;
-
- virtual hailo_status send_pending_buffer(size_t device_index = 0) override;
- virtual Expected<size_t> get_buffer_frames_size() const override;
- virtual Expected<size_t> get_pending_frames_count() const override;
-
-protected:
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
- virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
-
-private:
- VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<InputVDeviceBaseStream> &vdevice_input_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle, hailo_status &status);
-
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
- virtual hailo_status flush() override;
-
- std::shared_ptr<InputVDeviceBaseStream> m_vdevice_input_stream;
- std::shared_ptr<PipelineMultiplexer> m_multiplexer;
- scheduler_ng_handle_t m_network_group_scheduler_handle;
- multiplexer_ng_handle_t m_network_group_multiplexer_handle;
- std::string m_network_name;
-
- std::unique_ptr<std::atomic_bool> m_is_aborted;
-};
-
-class VDeviceOutputStreamMultiplexerWrapper : public OutputStreamBase {
-public:
- virtual ~VDeviceOutputStreamMultiplexerWrapper() noexcept = default;
- VDeviceOutputStreamMultiplexerWrapper(const VDeviceOutputStreamMultiplexerWrapper &other) = delete;
- VDeviceOutputStreamMultiplexerWrapper &operator=(const VDeviceOutputStreamMultiplexerWrapper &other) = delete;
- VDeviceOutputStreamMultiplexerWrapper &operator=(VDeviceOutputStreamMultiplexerWrapper &&other) = delete;
- VDeviceOutputStreamMultiplexerWrapper(VDeviceOutputStreamMultiplexerWrapper &&other) = default;
-
- static Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> create(std::shared_ptr<OutputVDeviceBaseStream> vdevice_output_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle = 0);
- Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> clone(multiplexer_ng_handle_t network_group_multiplexer_handle);
-
- virtual const hailo_stream_info_t &get_info() const override;
- virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config() override;
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual hailo_stream_interface_t get_interface() const override;
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual bool is_scheduled() override;
- virtual Expected<size_t> get_buffer_frames_size() const override;
- virtual Expected<size_t> get_pending_frames_count() const override;
-
- virtual hailo_status register_for_d2h_interrupts(const std::function<void(uint32_t)> &callback) override
- {
- return m_vdevice_output_stream->register_for_d2h_interrupts(callback);
- }
-
-protected:
- virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer) override;
-
-private:
- VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<OutputVDeviceBaseStream> &vdevice_output_stream,
- std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_ng_handle_t network_group_scheduler_handle,
- multiplexer_ng_handle_t network_group_multiplexer_handle, hailo_status &status);
-
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
- virtual hailo_status read_all(MemoryView &buffer) override;
- virtual hailo_status read(MemoryView buffer) override;
-
- std::shared_ptr<OutputVDeviceBaseStream> m_vdevice_output_stream;
- std::shared_ptr<PipelineMultiplexer> m_multiplexer;
- scheduler_ng_handle_t m_network_group_scheduler_handle;
- multiplexer_ng_handle_t m_network_group_multiplexer_handle;
- std::string m_network_name;
- EventPtr m_read_event;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_VDEVICE_STREAM_MULTIPLEXER_WRAPPER_HPP_ */
--- /dev/null
+cmake_minimum_required(VERSION 3.0.0)
+
+set(SRC_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_device.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_config_core_op.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_config_activated_core_op.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_config_manager.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_stream.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_stream_base.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/vdma_async_stream.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/pcie/pcie_device.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/integrated/integrated_device.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/channel/channel_state.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/channel/channel_base.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/channel/buffered_channel.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/channel/boundary_channel.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/channel/async_channel.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/channel/interrupts_dispatcher.cpp
+
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/descriptor_list.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/vdma_buffer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/dma_mapped_buffer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/mapped_buffer_impl.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/mapped_buffer_factory.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/sg_buffer.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/memory/continuous_buffer.cpp
+)
+
+set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
--- /dev/null
+/**\r
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file async_channel.cpp\r
+ * @brief Implementation of the AsyncChannel class\r
+ **/\r
+\r
+#include "async_channel.hpp"\r
+#include "hailo/hailort.h"\r
+#include "hailo/hailort_common.hpp"\r
+\r
+namespace hailort\r
+{\r
+namespace vdma\r
+{\r
+\r
+Expected<AsyncChannelPtr> AsyncChannel::create(vdma::ChannelId channel_id, Direction direction,\r
+ HailoRTDriver &driver, uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,\r
+ LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr)\r
+{\r
+ hailo_status status = HAILO_UNINITIALIZED;\r
+ auto channel_ptr = make_shared_nothrow<vdma::AsyncChannel>(channel_id, direction, driver, descs_count,\r
+ desc_page_size, stream_name, latency_meter, transfers_per_axi_intr, status);\r
+ CHECK_NOT_NULL_AS_EXPECTED(channel_ptr, HAILO_OUT_OF_HOST_MEMORY);\r
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating AsyncChannel");\r
+ return channel_ptr;\r
+}\r
+\r
+AsyncChannel::AsyncChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,\r
+ uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,\r
+ LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr,\r
+ hailo_status &status) :\r
+ BoundaryChannel(BoundaryChannel::Type::ASYNC, channel_id, direction, driver, descs_count, desc_page_size,\r
+ stream_name, latency_meter, transfers_per_axi_intr, status)\r
+{\r
+ // Check that base constructor was successful\r
+ if (HAILO_SUCCESS != status) {\r
+ LOGGER__ERROR("Failed building Vdma Channel base class");\r
+ return;\r
+ }\r
+\r
+ status = HAILO_SUCCESS;\r
+}\r
+\r
+hailo_status AsyncChannel::transfer(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque)\r
+{\r
+ CHECK_NOT_NULL(buffer, HAILO_INVALID_ARGUMENT);\r
+ CHECK(0 != buffer->size(), HAILO_INVALID_ARGUMENT);\r
+\r
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());\r
+ if (m_state->m_is_aborted) {\r
+ LOGGER__INFO("Tried to write to aborted channel {}", m_channel_id);\r
+ return HAILO_STREAM_ABORTED_BY_USER;\r
+ }\r
+\r
+ hailo_status status = HAILO_UNINITIALIZED;\r
+ if (Direction::H2D == m_direction) {\r
+ status = transfer_h2d(buffer, user_callback, opaque);\r
+ } else {\r
+ status = transfer_d2h(buffer, user_callback, opaque);\r
+ }\r
+\r
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {\r
+ LOGGER__INFO("Transfer failed because Channel {} is not activated", m_channel_id);\r
+ return HAILO_STREAM_NOT_ACTIVATED;\r
+ } \r
+ if (HAILO_SUCCESS != status) {\r
+ LOGGER__ERROR("Transfer failed for channel {} with status {}", m_channel_id, status);\r
+ return status;\r
+ }\r
+\r
+ return HAILO_SUCCESS;\r
+}\r
+\r
+hailo_status AsyncChannel::cancel_pending_transfers()\r
+{\r
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());\r
+ for (auto &pending_buffer_info : m_state->m_pending_buffers) {\r
+ if (pending_buffer_info.on_transfer_done) {\r
+ pending_buffer_info.on_transfer_done(pending_buffer_info.buffer,\r
+ hailo_async_transfer_completion_info_t{HAILO_STREAM_NOT_ACTIVATED},\r
+ pending_buffer_info.opaque);\r
+ // Release our references to user buffer, callback and opaque\r
+ pending_buffer_info = PendingBuffer{};\r
+ } else {\r
+ LOGGER__WARNING("No transfer done callback found for transfer (channel {}); skipping", m_channel_id);\r
+ }\r
+ }\r
+\r
+ return HAILO_SUCCESS;\r
+}\r
+\r
+hailo_status AsyncChannel::complete_channel_activation(uint32_t /* transfer_size */, bool /* resume_pending_transfers */)\r
+{\r
+ return HAILO_SUCCESS;\r
+}\r
+\r
+hailo_status AsyncChannel::complete_channel_deactivation()\r
+{\r
+ // Note: We don't reset channel counters here as the resource manager will signal pending transfers\r
+ // (i.e. transfers in m_pending_buffers) via cancel_pending_async_transfers.\r
+ // The counters are reset in the channel activation\r
+ return HAILO_SUCCESS;\r
+}\r
+\r
+hailo_status AsyncChannel::transfer(void */* buf */, size_t /* count */)\r
+{\r
+ return HAILO_NOT_IMPLEMENTED;\r
+}\r
+\r
+hailo_status AsyncChannel::write_buffer(const MemoryView &/* buffer */, std::chrono::milliseconds /* timeout */,\r
+ const std::function<bool()> &/* should_cancel */)\r
+{\r
+ return HAILO_NOT_IMPLEMENTED;\r
+}\r
+\r
+hailo_status AsyncChannel::send_pending_buffer()\r
+{\r
+ return HAILO_NOT_IMPLEMENTED;\r
+}\r
+\r
+void AsyncChannel::notify_all()\r
+{}\r
+\r
+Expected<BoundaryChannel::BufferState> AsyncChannel::get_buffer_state()\r
+{\r
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);\r
+}\r
+\r
+Expected<size_t> AsyncChannel::get_h2d_pending_frames_count()\r
+{\r
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);\r
+}\r
+\r
+Expected<size_t> AsyncChannel::get_d2h_pending_descs_count()\r
+{\r
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);\r
+}\r
+\r
+hailo_status AsyncChannel::transfer_d2h(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque)\r
+{\r
+ InterruptsDomain first_desc_interrupts_domain = InterruptsDomain::NONE;\r
+ // Provide FW interrupt only in the end of the last transfer in the batch\r
+ InterruptsDomain last_desc_interrupts_domain = (m_state->m_accumulated_transfers + 1 == m_transfers_per_axi_intr) ? \r
+ InterruptsDomain::BOTH : InterruptsDomain::HOST;\r
+\r
+ const auto status = prepare_descriptors(buffer, user_callback, opaque, first_desc_interrupts_domain, last_desc_interrupts_domain);\r
+ CHECK_SUCCESS(status);\r
+\r
+ m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;\r
+\r
+ return HAILO_SUCCESS;\r
+}\r
+\r
+hailo_status AsyncChannel::transfer_h2d(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque)\r
+{\r
+ // For h2d, only the host need to get transfer done interrupts\r
+ InterruptsDomain last_desc_interrupts_domain = InterruptsDomain::HOST;\r
+ // If we measure latency, we need interrupt on the first descriptor\r
+ InterruptsDomain first_desc_interrupts_domain = (m_latency_meter != nullptr) ?\r
+ InterruptsDomain::HOST : InterruptsDomain::NONE;\r
+\r
+ return prepare_descriptors(buffer, user_callback, opaque, first_desc_interrupts_domain, last_desc_interrupts_domain);\r
+}\r
+\r
+hailo_status AsyncChannel::prepare_descriptors(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback,\r
+ void *opaque, InterruptsDomain first_desc_interrupts_domain, InterruptsDomain last_desc_interrupts_domain)\r
+{\r
+ const auto desired_desc_num = m_desc_list->descriptors_in_buffer(buffer->size());\r
+ CHECK(desired_desc_num <= MAX_DESCS_COUNT, HAILO_INTERNAL_FAILURE);\r
+ const uint16_t desc_num = static_cast<uint16_t>(desired_desc_num);\r
+\r
+ int num_available = get_num_available();\r
+ int num_processed = CB_TAIL(m_state->m_descs);\r
+ int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);\r
+ if (num_free < desc_num) {\r
+ // TODO: do we want to block here?\r
+ return HAILO_OUT_OF_DESCRIPTORS;\r
+ }\r
+\r
+ const auto status = m_desc_list->configure_to_use_buffer(*buffer, m_channel_id, num_available);\r
+ CHECK_SUCCESS(status);\r
+ if (nullptr != m_latency_meter) {\r
+ // Program first descriptor\r
+ m_desc_list->program_single_descriptor((*m_desc_list)[num_available], m_desc_list->desc_page_size(),\r
+ first_desc_interrupts_domain);\r
+ }\r
+ auto actual_desc_count = m_desc_list->program_last_descriptor(buffer->size(), last_desc_interrupts_domain,\r
+ num_available, true);\r
+ CHECK_EXPECTED_AS_STATUS(actual_desc_count, "Failed to program desc_list for channel {}", m_channel_id);\r
+ assert (actual_desc_count.value() == desc_num);\r
+ int last_desc_avail = ((num_available + desc_num - 1) & m_state->m_descs.size_mask);\r
+\r
+ const auto callback = [this, user_callback](std::shared_ptr<DmaMappedBuffer> buffer, const hailo_async_transfer_completion_info_t &status, void *opaque) {\r
+ user_callback(buffer, status, opaque);\r
+\r
+ // opaque is only for the user callback\r
+ static constexpr void *NO_CONTEXT = nullptr;\r
+ m_transfer_done_callback(buffer, status, NO_CONTEXT);\r
+ };\r
+\r
+ m_state->add_pending_buffer(num_available, last_desc_avail, m_direction, callback, buffer, opaque);\r
+ return inc_num_available(desc_num);\r
+}\r
+\r
+} /* namespace vdma */\r
+} /* namespace hailort */\r
--- /dev/null
+/**\r
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file async_channel.hpp\r
+ * @brief AsyncChannel - Implements the BoundaryChannel interface, allowing for asyc send/recv and zero copy io\r
+ **/\r
+\r
+#ifndef _HAILO_ASYNC_CHANNEL_HPP_\r
+#define _HAILO_ASYNC_CHANNEL_HPP_\r
+\r
+#include "hailo/hailort.h"\r
+\r
+#include "vdma/channel/boundary_channel.hpp"\r
+#include "vdma/channel/channel_state.hpp"\r
+\r
+#include <functional>\r
+\r
+\r
+namespace hailort\r
+{\r
+namespace vdma\r
+{\r
+\r
+class AsyncChannel;\r
+using AsyncChannelPtr = std::shared_ptr<AsyncChannel>;\r
+\r
+class AsyncChannel : public BoundaryChannel\r
+{\r
+public:\r
+ static Expected<AsyncChannelPtr> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,\r
+ uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,\r
+ uint16_t transfers_per_axi_intr = 1);\r
+ \r
+ AsyncChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,\r
+ uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr,\r
+ hailo_status &status);\r
+ AsyncChannel(AsyncChannel &&) = delete;\r
+ AsyncChannel(const AsyncChannel &) = delete;\r
+ AsyncChannel &operator=(AsyncChannel &&) = delete;\r
+ AsyncChannel &operator=(const AsyncChannel &) = delete;\r
+ virtual ~AsyncChannel() = default;\r
+\r
+ virtual hailo_status complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers) override;\r
+ virtual hailo_status complete_channel_deactivation() override;\r
+\r
+ virtual hailo_status transfer(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque) override;\r
+ virtual hailo_status cancel_pending_transfers() override;\r
+\r
+ virtual hailo_status transfer(void *buf, size_t count) override;\r
+ // TODO: don't want\r
+ virtual hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,\r
+ const std::function<bool()> &should_cancel) override;\r
+ // TODO: don't want\r
+ virtual hailo_status send_pending_buffer() override;\r
+ // TODO: don't want\r
+ virtual void notify_all() override;\r
+\r
+ // TODO: don't want\r
+ virtual Expected<BoundaryChannel::BufferState> get_buffer_state() override;\r
+ // TODO: don't want\r
+ virtual Expected<size_t> get_h2d_pending_frames_count() override;\r
+ // TODO: don't want\r
+ virtual Expected<size_t> get_d2h_pending_descs_count() override;\r
+\r
+private:\r
+ hailo_status transfer_d2h(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque);\r
+ hailo_status transfer_h2d(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque);\r
+ hailo_status prepare_descriptors(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback,\r
+ void *opaque, InterruptsDomain first_desc_interrupts_domain, InterruptsDomain last_desc_interrupts_domain);\r
+};\r
+\r
+} /* namespace vdma */\r
+} /* namespace hailort */\r
+\r
+#endif /* _HAILO_ASYNC_CHANNEL_HPP_ */\r
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file boundary_channel.cpp
+ * @brief BoundaryChannel - Base class functionality
+ **/
+
+#include "hailo/hailort_common.hpp"
+
+#include "common/os_utils.hpp"
+
+#include "vdma/channel/boundary_channel.hpp"
+#include "vdma/channel/buffered_channel.hpp"
+#include "vdma/channel/async_channel.hpp"
+
+#include <list>
+#include <chrono>
+#include <thread>
+#include <iostream>
+
+
+namespace hailort {
+namespace vdma {
+
+
+Expected<BoundaryChannelPtr> BoundaryChannel::create(vdma::ChannelId channel_id, Direction direction,
+ HailoRTDriver &driver, uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
+ LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, Type type)
+{
+ switch (type)
+ {
+ case Type::BUFFERED:
+ {
+ auto buffered_channel = BufferedChannel::create(channel_id, direction, driver, descs_count, desc_page_size,
+ stream_name, latency_meter, transfers_per_axi_intr);
+ CHECK_EXPECTED(buffered_channel);
+
+ // Upcasting
+ return std::static_pointer_cast<BoundaryChannel>(buffered_channel.value());
+ }
+
+ case Type::ASYNC:
+ {
+ auto async_channel = AsyncChannel::create(channel_id, direction, driver, descs_count, desc_page_size,
+ stream_name, latency_meter, transfers_per_axi_intr);
+ CHECK_EXPECTED(async_channel);
+
+ // Upcasting
+ return std::static_pointer_cast<BoundaryChannel>(async_channel.value());
+ }
+ }
+
+ // Shouldn't get here
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+}
+
+BoundaryChannel::BoundaryChannel(Type type, vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
+ uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
+ LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, hailo_status &status) :
+ ChannelBase(channel_id, direction, driver, descs_count, desc_page_size, stream_name, latency_meter, status),
+ m_type(type),
+ m_user_interrupt_callback(ignore_processing_complete),
+ m_transfers_per_axi_intr(transfers_per_axi_intr)
+{
+ // Check that base constructor was successful
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed building vdma channel base class");
+ return;
+ }
+
+ if (Direction::BOTH == direction) {
+ LOGGER__ERROR("Boundary channels must be unidirectional");
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ if (m_transfers_per_axi_intr == 0) {
+ LOGGER__ERROR("Invalid transfers per axi interrupt");
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ m_transfer_done_callback = [this](std::shared_ptr<DmaMappedBuffer>, const hailo_async_transfer_completion_info_t &, void *) {
+ m_user_interrupt_callback(1);
+ };
+}
+
+void BoundaryChannel::clear_pending_buffers_descriptors()
+{
+ for (const auto &pending_buffer : m_state->m_pending_buffers) {
+ const auto last_desc_index = pending_buffer.last_desc;
+
+ // Clear relevant descriptors from previous transfer
+ if (nullptr != m_latency_meter) {
+ const auto latency_desc_index = pending_buffer.latency_measure_desc;
+ m_desc_list->clear_descriptor(latency_desc_index);
+ }
+ m_desc_list->clear_descriptor(last_desc_index);
+ }
+}
+
+hailo_status BoundaryChannel::trigger_channel_completion(uint16_t hw_num_processed)
+{
+ size_t processed_no = 0;
+
+ {
+ // NOTE: right now, we can retake the 'completion' descriptor for a new transfer before handling the interrupt.
+ // we should have our own pointers indicating whats free instead of reading from HW.
+ // TODO: consider calculating the last descriptor using the src_desc_avail and src_desc_proc instead of using
+ // status?
+ // TODO: we might free a pending buffer which we didn't get an interrupt for yet. we should still handle this
+ // situation correctly.
+
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+
+ // Although the hw_num_processed should be a number between 0 and m_descs.size-1, if m_desc.size < 0x10000
+ // (the maximum desc size), the actual hw_num_processed is a number between 1 and m_descs.size. Therefore the
+ // value can be m_descs.size, in this case we change it to zero.
+ hw_num_processed = static_cast<uint16_t>(hw_num_processed & m_state->m_descs.size_mask);
+
+ if (m_state->m_is_aborted) {
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+
+ if (!m_state->m_is_channel_activated) {
+ return HAILO_STREAM_NOT_ACTIVATED;
+ }
+
+ if (m_latency_meter != nullptr) {
+ // The latency meter gets an updated hw_num_processed via a call to vdma_interrupts_read_timestamps
+ // (the desc index of the last measured timestamp returned from that ioctl). Since update_latency_meter
+ // processed m_pending_buffers based on this hw_num_processed, and this function (i.e.
+ // trigger_channel_completion) also processes m_pending_buffers based on the value of hw_num_processed,
+ // we want the two to be the same. Hence, we'll use the more up to date num_processed returned by
+ // update_latency_meter.
+ // TODO: fix update_latency_meter flow (HRT-10284)
+ auto latency_meter_hw_num_processed = update_latency_meter();
+ CHECK_EXPECTED_AS_STATUS(latency_meter_hw_num_processed);
+ hw_num_processed = latency_meter_hw_num_processed.value();
+ }
+
+ const auto last_num_processed = static_cast<uint16_t>(CB_TAIL(m_state->m_descs));
+
+ // Calculate pending_buffers_count before iteration, because the iteration removes done transfers
+ const auto pending_buffers_count = m_state->m_pending_buffers.size();
+ for (size_t i = 0; i < pending_buffers_count; i++) {
+ auto &last_pending_buffer_info = m_state->m_pending_buffers.front();
+ const auto last_desc_index = static_cast<uint16_t>(last_pending_buffer_info.last_desc);
+ // Transfer is complete if its last descriptor is in [last_num_processed, hw_num_processed) or
+ // the the buffer is empty (hw_num_processed == get_num_available())
+ const bool is_complete = is_desc_between(last_num_processed, hw_num_processed, last_desc_index) ||
+ (hw_num_processed == get_num_available());
+
+ #ifndef NDEBUG
+ static constexpr auto STATUS_MASK = 0xFF;
+ static constexpr auto ERROR_BIT = 1;
+ const auto status = (*m_desc_list)[last_desc_index].RemainingPageSize_Status & STATUS_MASK;
+ CHECK(!is_bit_set(status, ERROR_BIT), HAILO_INTERNAL_FAILURE,
+ "Error while processing descriptor {} of DMA {} on board {}.",
+ last_desc_index, m_channel_id, m_driver.dev_path());
+
+ // status is read after hw_num_processed, so we want is_complete -> (status == 1).
+ assert(!is_complete || ((status & 0x1) == 1));
+ #endif
+
+ if (!is_complete) {
+ break;
+ }
+
+ // Clear relevant descriptors from previous transfer
+ if (nullptr != m_latency_meter) {
+ const auto latency_desc_index = last_pending_buffer_info.latency_measure_desc;
+ m_desc_list->clear_descriptor(latency_desc_index);
+ }
+ m_desc_list->clear_descriptor(last_desc_index);
+
+ _CB_SET(m_state->m_descs.tail, (last_pending_buffer_info.last_desc + 1) & m_state->m_descs.size_mask);
+ last_pending_buffer_info.on_transfer_done(last_pending_buffer_info.buffer,
+ hailo_async_transfer_completion_info_t{HAILO_SUCCESS}, last_pending_buffer_info.opaque);
+ processed_no++;
+ m_state->m_pending_buffers.pop_front();
+ }
+ }
+
+ if (0 < processed_no) {
+ m_state->transfer_buffer_cv().notify_all();
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BoundaryChannel::register_interrupt_callback(const ProcessingCompleteCallback &callback)
+{
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+ m_user_interrupt_callback = callback;
+ return HAILO_SUCCESS;
+}
+
+CONTROL_PROTOCOL__host_buffer_info_t BoundaryChannel::get_boundary_buffer_info(uint32_t transfer_size)
+{
+ // Boundary channels always have scatter gather buffers
+ return VdmaBuffer::get_host_buffer_info(VdmaBuffer::Type::SCATTER_GATHER, m_desc_list->dma_address(),
+ m_desc_list->desc_page_size(), m_desc_list->count(), transfer_size);
+}
+
+hailo_status BoundaryChannel::abort()
+{
+ {
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+ m_state->m_is_aborted = true;
+ }
+
+ m_state->transfer_buffer_cv().notify_all();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BoundaryChannel::clear_abort()
+{
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+ m_state->m_is_aborted = false;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BoundaryChannel::activate(uint32_t transfer_size, bool resume_pending_transfers)
+{
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+
+ CHECK(!m_state->m_is_channel_activated, HAILO_INTERNAL_FAILURE,
+ "Vdma channel {} is already activated", m_channel_id);
+ m_state->m_is_channel_activated = true;
+ clear_pending_buffers_descriptors();
+ m_state->reset_counters();
+
+ auto status = complete_channel_activation(transfer_size, resume_pending_transfers);
+ if (HAILO_SUCCESS != status) {
+ m_state->m_is_channel_activated = false;
+ return status;
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BoundaryChannel::deactivate()
+{
+ std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
+
+ CHECK(m_state->m_is_channel_activated, HAILO_INTERNAL_FAILURE,
+ "Vdma channel {} is not activated", m_channel_id);
+ m_state->m_is_channel_activated = false;
+
+ // Reset the user callback, so as not to keep objects provided by the user alive (they may lead to a chain of refs
+ // back to this channel causing it to be leaked).
+ // Note: PendingBuffers held by m_pending_buffers may still hold copies of the current m_transfer_done_callback,
+ // which in turn holds a reference to *this. Since we stop the m_wait_interrupts_thread there's no risk that
+ // these callbacks will be called and we don't need to reset this callback.
+ m_user_interrupt_callback = ignore_processing_complete;
+
+ auto status = complete_channel_deactivation();
+ CHECK_SUCCESS(status);
+
+ return HAILO_SUCCESS;
+}
+
+BoundaryChannel::Type BoundaryChannel::type() const
+{
+ return m_type;
+}
+
+hailo_status BoundaryChannel::flush(const std::chrono::milliseconds &timeout)
+{
+ if (Direction::D2H == m_direction) {
+ // We are not buffering user data
+ return HAILO_SUCCESS;
+ }
+
+ std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
+ hailo_status status = HAILO_SUCCESS; // Best effort
+ bool was_successful = m_state->transfer_buffer_cv().wait_for(state_guard, timeout, [this, &status] () {
+ if (m_state->m_is_aborted) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+ return m_state->m_pending_buffers.empty();
+ });
+ CHECK(was_successful, HAILO_TIMEOUT, "Got HAILO_TIMEOUT while waiting for channel {} interrupts on flush", m_channel_id);
+ return status;
+}
+
+bool BoundaryChannel::is_ready_for_transfer_h2d(size_t buffer_size)
+{
+ return has_room_in_desc_list(buffer_size);
+}
+
+bool BoundaryChannel::is_ready_for_transfer_d2h(size_t buffer_size)
+{
+ return has_room_in_desc_list(buffer_size);
+}
+
+bool BoundaryChannel::has_room_in_desc_list(size_t buffer_size)
+{
+ size_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer_size);
+ assert(desired_desc_num <= MAX_DESCS_COUNT);
+ int desc_num = static_cast<int>(desired_desc_num);
+
+ if (m_state->m_pending_buffers.full()) {
+ return false;
+ }
+
+ int num_available = get_num_available();
+ int num_processed = CB_TAIL(m_state->m_descs);
+
+ if (desc_num == m_state->m_descs.size) {
+ // Special case when the checking if the buffer is empty
+ return num_available == num_processed;
+ }
+
+ int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
+ if (num_free < desc_num) {
+ return false;
+ }
+
+ return true;
+}
+
+hailo_status BoundaryChannel::wait(size_t buffer_size, std::chrono::milliseconds timeout)
+{
+ const auto max_transfer_size = m_desc_list->desc_page_size() * m_desc_list->count();
+ CHECK(buffer_size < max_transfer_size, HAILO_INVALID_ARGUMENT,
+ "Requested transfer size ({}) must be smaller than ({})", buffer_size, max_transfer_size);
+
+ auto is_ready_for_transfer = (Direction::H2D == m_direction) ?
+ std::bind(&BoundaryChannel::is_ready_for_transfer_h2d, this, buffer_size) :
+ std::bind(&BoundaryChannel::is_ready_for_transfer_d2h, this, buffer_size);
+
+ std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
+ hailo_status status = HAILO_SUCCESS; // Best effort
+ bool was_successful = m_state->transfer_buffer_cv().wait_for(state_guard, timeout, [this, is_ready_for_transfer, &status] () {
+ if (m_state->m_is_aborted) {
+ status = HAILO_STREAM_ABORTED_BY_USER;
+ return true; // return true so that the wait will finish
+ }
+
+ return is_ready_for_transfer();
+ });
+ CHECK(was_successful, HAILO_TIMEOUT, "Got HAILO_TIMEOUT while waiting for channel {} interrupts", m_channel_id);
+ return status;
+}
+
+hailo_status BoundaryChannel::set_transfers_per_axi_intr(uint16_t transfers_per_axi_intr)
+{
+ CHECK(0 != transfers_per_axi_intr, HAILO_INVALID_ARGUMENT, "Invalid transfers per axi interrupt");
+ m_transfers_per_axi_intr = transfers_per_axi_intr;
+ return HAILO_SUCCESS;
+}
+
+} /* namespace vdma */
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file boundary_channel.hpp
+ * @brief BoundaryChannel - vdma boundary channel interface
+ * The hierarchy is as follows:
+ * -------------------------------------------------------------------------
+ * | ChannelBase | (Base class - includes state) |
+ * | | | |
+ * | BoundaryChannel | (Boundary interface) |
+ * | / \ | |
+ * | AsyncChannel BufferedChannel | (Impls) |
+ * -------------------------------------------------------------------------
+ **/
+
+#ifndef _HAILO_VDMA_BOUNDARY_CHANNEL_HPP_
+#define _HAILO_VDMA_BOUNDARY_CHANNEL_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/stream.hpp"
+
+#include "vdma/channel/channel_base.hpp"
+
+#include <memory>
+
+
+namespace hailort {
+namespace vdma {
+
+class BoundaryChannel;
+using BoundaryChannelPtr = std::shared_ptr<BoundaryChannel>;
+
+using ProcessingCompleteCallback = std::function<void(uint32_t frames_processed)>;
+
+class BoundaryChannel : public ChannelBase
+{
+public:
+ enum class Type
+ {
+ BUFFERED = 0,
+ ASYNC
+ };
+
+ static Expected<BoundaryChannelPtr> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
+ uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,
+ uint16_t transfers_per_axi_intr = 1, Type type = Type::BUFFERED);
+
+ BoundaryChannel(Type type, vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
+ uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr,
+ hailo_status &status);
+ BoundaryChannel(const BoundaryChannel &other) = delete;
+ BoundaryChannel &operator=(const BoundaryChannel &other) = delete;
+ BoundaryChannel(BoundaryChannel &&other) = delete;
+ BoundaryChannel &operator=(BoundaryChannel &&other) = delete;
+ virtual ~BoundaryChannel() = default;
+
+ // Called after the FW activated the channel.
+ hailo_status activate(uint32_t transfer_size, bool resume_pending_transfers);
+
+ // Called before the FW deactivated the channel.
+ hailo_status deactivate();
+
+ Type type() const;
+
+ void clear_pending_buffers_descriptors();
+ hailo_status trigger_channel_completion(uint16_t hw_num_processed);
+ virtual hailo_status register_interrupt_callback(const ProcessingCompleteCallback &callback);
+ CONTROL_PROTOCOL__host_buffer_info_t get_boundary_buffer_info(uint32_t transfer_size);
+ virtual hailo_status abort();
+ virtual hailo_status clear_abort();
+
+ // For D2H channels, we don't buffer data
+ // Hence there's nothing to be "flushed" and the function will return with HAILO_SUCCESS
+ virtual hailo_status flush(const std::chrono::milliseconds &timeout);
+ virtual hailo_status wait(size_t buffer_size, std::chrono::milliseconds timeout);
+ hailo_status set_transfers_per_axi_intr(uint16_t transfers_per_axi_intr);
+
+ virtual hailo_status transfer(void *buf, size_t count) = 0;
+ // TODO: can write_buffer + send_pending_buffer move to BufferedChannel? (HRT-9105)
+ // Either write_buffer + send_pending_buffer or transfer (h2d) should be used on a given channel, not both
+ virtual hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
+ const std::function<bool()> &should_cancel) = 0;
+ virtual hailo_status send_pending_buffer() = 0;
+
+ // TODO: move buffer?
+ // TODO: If the same callback is used for different buffers we need a way to tell the transfers appart
+ // - Passing buffer to callback could do the trick. However, what will happen if the same buffer has been transferred twice?
+ // - Maybe add a unique transfer_id? At least unique in the context of the maximum number of ongoing transfers
+ // TODO: What if there's no more room in desc list so the transfer can't be programmed? Should the function block
+ // - Maybe define that if more than max_concurrent_transfers() (based on a param passed to create) the function will return a failure?
+ // When the transfer is complete (i.e. data is written to/from buffer with a D2H/H2D channel) callback is called
+ // buffer can't be freed until callback is called
+ virtual hailo_status transfer(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque) = 0;
+
+ // Calls all pending transfer callbacks (if they exist), marking them as canceled by passing hailo_async_transfer_completion_info_t{HAILO_STREAM_NOT_ACTIVATED}.
+ // Note: This function is to be called on a deactivated channel object. Calling on an active channel will lead to unexpected results
+ virtual hailo_status cancel_pending_transfers() = 0;
+
+ virtual void notify_all() = 0;
+
+ class BufferState {
+ public:
+ std::vector<std::pair<uint16_t, Buffer>> desc_buffer_pairing;
+ uint16_t num_avail;
+ uint16_t num_processed;
+ uint16_t hw_num_avail;
+ uint16_t hw_num_processed;
+ };
+
+ // Assumes that the channel is idle; doesn't block changes to the channel
+ // To be used for debugging purposes
+ // TODO: these will move to BufferedChannel (HRT-9105)
+ virtual Expected<BufferState> get_buffer_state() = 0;
+ virtual Expected<size_t> get_h2d_pending_frames_count() = 0;
+ virtual Expected<size_t> get_d2h_pending_descs_count() = 0;
+
+protected:
+ static void ignore_processing_complete(uint32_t) {}
+ void stop_interrupts_thread(std::unique_lock<RecursiveSharedMutex> &lock);
+ virtual bool is_ready_for_transfer_h2d(size_t buffer_size);
+ virtual bool is_ready_for_transfer_d2h(size_t buffer_size);
+
+ // Called after activate/deactivate with the state mutex held
+ virtual hailo_status complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers) = 0;
+ virtual hailo_status complete_channel_deactivation() = 0;
+
+ const Type m_type;
+ TransferDoneCallback m_transfer_done_callback;
+ ProcessingCompleteCallback m_user_interrupt_callback;
+ uint16_t m_transfers_per_axi_intr;
+
+private:
+ bool has_room_in_desc_list(size_t buffer_size);
+};
+
+} /* namespace vdma */
+} /* namespace hailort */
+
+#endif // _HAILO_VDMA_BOUNDARY_CHANNEL_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file buffered_channel.cpp
+ * @brief Implementation of the BufferedChannel class
+ **/
+
+#include "hailo/hailort_common.hpp"
+
+#include "common/logger_macros.hpp"
+
+#include "vdma/channel/buffered_channel.hpp"
+#include "vdma/memory/mapped_buffer_factory.hpp"
+#include "vdma/memory/mapped_buffer_impl.hpp"
+#include "hw_consts.hpp"
+
+#include <list>
+#include <chrono>
+#include <thread>
+#include <iostream>
+
+
+namespace hailort {
+namespace vdma {
+
+Expected<BufferedChannelPtr> BufferedChannel::create(vdma::ChannelId channel_id, Direction direction,
+ HailoRTDriver &driver, uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
+ LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto channel_ptr = make_shared_nothrow<vdma::BufferedChannel>(channel_id, direction, driver, descs_count,
+ desc_page_size, stream_name, latency_meter, transfers_per_axi_intr, status);
+ CHECK_NOT_NULL_AS_EXPECTED(channel_ptr, HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating BufferedChannel");
+
+ return channel_ptr;
+}
+
+BufferedChannel::BufferedChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
+ uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
+ LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, hailo_status &status) :
+ BoundaryChannel(BoundaryChannel::Type::BUFFERED, channel_id, direction, driver, descs_count, desc_page_size,
+ stream_name, latency_meter, transfers_per_axi_intr, status),
+ m_channel_buffer(nullptr),
+ m_pending_buffers_sizes(0),
+ m_pending_num_avail_offset(0)
+{
+ // Check that base constructor was successful
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed building boundary channel base class");
+ return;
+ }
+
+ auto mapped_buffer = create_mapped_buffer(descs_count, desc_page_size, direction, driver);
+ if (!mapped_buffer) {
+ LOGGER__ERROR("Failed building mapped vdma buffer");
+ status = mapped_buffer.status();
+ return;
+ }
+ m_channel_buffer = mapped_buffer.release();
+
+ status = m_desc_list->configure_to_use_buffer(*m_channel_buffer, channel_id, 0);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed binding vdma buffer to desc list");
+ return;
+ }
+
+ m_pending_buffers_sizes = CircularArray<size_t>(descs_count);
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<DmaMappedBuffer>> BufferedChannel::create_mapped_buffer(uint32_t descs_count, uint16_t desc_page_size,
+ Direction direction, HailoRTDriver &driver)
+{
+ auto desc_page_size_value = driver.calc_desc_page_size(desc_page_size);
+ CHECK_AS_EXPECTED(is_powerof2(desc_page_size_value), HAILO_INVALID_ARGUMENT, "Descriptor page_size must be a power of two.");
+
+ auto mapped_buffer_exp = MappedBufferFactory::create_mapped_buffer(descs_count * desc_page_size_value, direction, driver);
+ CHECK_EXPECTED(mapped_buffer_exp);
+
+ auto mapped_buffer = make_shared_nothrow<DmaMappedBuffer>(mapped_buffer_exp.release());
+ CHECK_NOT_NULL_AS_EXPECTED(mapped_buffer, HAILO_OUT_OF_HOST_MEMORY);
+
+ return mapped_buffer;
+}
+
+hailo_status BufferedChannel::complete_channel_deactivation()
+{
+ const auto status = store_channel_buffer_state();
+ CHECK_SUCCESS(status);
+
+ if (Direction::H2D == m_direction) {
+ clear_pending_buffers_descriptors();
+ // For H2D channels we reset counters as we want to allow writes to the start of the buffer while the channel is stopped
+ m_state->reset_counters();
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::store_channel_buffer_state()
+{
+ // TODO: If a D2H channel is deactivated before all of it's pending frames have recv'd ints
+ // we'll store a tail value that won't be up to date when the channel is activated again.
+ // Potentially, we might overwrite frames in that situation. Note that we can't flush() in the case
+ // of D2H channels (as we can with H2D channels), because num_avail may be greater than the number of frames
+ // that will be recv'd on a given channel. E.g., upon channel activation for the first time we call
+ // prepare_d2h_pending_descriptors with the maximum number of descs possible for this channel, which will
+ // accommodate X frames. If the usert only sends Y < X frames on the input channel, only Y output frames will
+ // be recv'd (assuming one output frame per input frame). Hence, flush() won't return (we won't dequeue all
+ // pending buffers). This needs to be handled by the sched that uses this feature. (HRT-9456)
+ auto tail = get_hw_num_processed();
+ CHECK_EXPECTED_AS_STATUS(tail);
+
+ const auto temp = m_state->m_previous_tail;
+ m_state->m_previous_tail = (tail.value() + m_state->m_previous_tail) & m_state->m_descs.size_mask;
+ m_state->m_desc_list_delta = temp - m_state->m_previous_tail;
+
+ return HAILO_SUCCESS;
+}
+
+Expected<size_t> BufferedChannel::get_h2d_pending_frames_count()
+{
+ return m_pending_buffers_sizes.size();
+}
+
+Expected<size_t> BufferedChannel::get_d2h_pending_descs_count()
+{
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+
+ int num_proc = CB_TAIL(m_state->m_descs);
+ int desc_num_ready = CB_PROG(m_state->m_descs, num_proc, m_state->m_d2h_read_desc_index);
+
+ return desc_num_ready;
+}
+
+hailo_status BufferedChannel::prepare_d2h_pending_descriptors(uint32_t transfer_size, uint32_t transfers_count)
+{
+ // on D2H no need for interrupt of first descriptor
+ const auto first_desc_interrupts_domain = InterruptsDomain::NONE;
+ for (uint32_t i = 0; i < transfers_count; i++) {
+ // Provide FW interrupt only in the end of the last transfer in the batch
+ auto last_desc_interrutps_domain =
+ (static_cast<uint32_t>(m_transfers_per_axi_intr - 1) == (i % m_transfers_per_axi_intr)) ?
+ InterruptsDomain::BOTH : InterruptsDomain::HOST;
+ auto status = prepare_descriptors(transfer_size, first_desc_interrupts_domain, last_desc_interrutps_domain);
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ LOGGER__INFO("preparing descriptors failed because channel is not activated");
+ return status;
+ }
+ CHECK_SUCCESS(status, "Failed prepare desc status={}", status);
+ }
+
+ // We assume each output transfer is in the same size
+ m_state->m_accumulated_transfers += ((m_state->m_accumulated_transfers + transfers_count) % m_transfers_per_axi_intr);
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers)
+{
+ auto status = HAILO_UNINITIALIZED;
+
+ // We should have no active transfers now
+ if (resume_pending_transfers) {
+ // We want the first descriptor (at index zero) to point to where the descriptor at index
+ // m_state->m_previous_tail currently points to:
+ // * In the case of a D2H channel, m_state->m_previous_tail is the index of the desc where the hw would next
+ // write to (num_proc). Hence, the hw will now write exactly where it left off. Previously unread frames from
+ // the user (pointed to by m_state->m_d2h_read_desc_index) can still be read (the hw won't overwrite them).
+ // * In the case of a H2D channel, m_state->m_previous_tail is the index of the desc where the hw would next
+ // read from (num_proc). Hence, the hw will now read exactly where it left off. Previously written frames
+ // from the user (that appear before m_state->m_previous_tail), will not be re-written.
+ const uint32_t starting_desc_offset = (m_desc_list->count() - m_state->m_previous_tail) % m_desc_list->count();
+ status = m_desc_list->configure_to_use_buffer(*m_channel_buffer, m_channel_id,
+ starting_desc_offset);
+ CHECK_SUCCESS(status);
+
+ if (Direction::D2H == m_direction) {
+ // m_d2h_read_desc_index, which is relative to the first desc, needs to shift by m_desc_list_delta
+ m_state->m_d2h_read_desc_index = (m_state->m_d2h_read_desc_index + m_state->m_desc_list_delta) & m_state->m_descs.size_mask;
+ }
+ } else {
+ // We're not resuming pending transfers - clear relevant pointers.
+ m_state->reset_previous_state_counters();
+ }
+
+ if ((Direction::D2H == m_direction) && (transfer_size != 0)) {
+ const auto transfers_in_buffer = get_transfers_count_in_buffer(transfer_size);
+ const auto pending_descs = get_d2h_pending_descs_count();
+ const auto descs_in_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
+ const auto pending_transfers = pending_descs.value() / descs_in_transfer;
+ // We prepare descs in advance for D2H channels:
+ // (1) The channel's buffer can store up to 'transfers_in_buffer' frames of size transfer_size
+ // (2) There are 'pending_transfers' frames from the previous channel activation (we assume that the same
+ // 'transfer_size' was used)
+ // (3) Hence, we have room for 'transfers_in_buffer - pending_transfers' frames in the buffer currently.
+ // (4) However, we can allow at most 'm_state->m_pending_buffers.capacity()' transfers. We can't store more than
+ // that in the pending buffers circular array.
+ // (5) Hence, we'll take the minimum between (3) and (4).
+ const auto transfers_count = std::min(transfers_in_buffer - pending_transfers,
+ m_state->m_pending_buffers.capacity());
+ status = prepare_d2h_pending_descriptors(transfer_size, static_cast<uint32_t>(transfers_count));
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::transfer(void *buf, size_t count)
+{
+ CHECK_NOT_NULL(buf, HAILO_INVALID_ARGUMENT);
+ CHECK(0 != count, HAILO_INVALID_ARGUMENT);
+
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+ if (m_state->m_is_aborted) {
+ LOGGER__INFO("Tried to write to aborted channel {}", m_channel_id);
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ if (Direction::H2D == m_direction) {
+ status = transfer_h2d(buf, count);
+ } else {
+ status = transfer_d2h(buf, count);
+ }
+
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ LOGGER__INFO("Transfer failed because Channel {} is not activated", m_channel_id);
+ return HAILO_STREAM_NOT_ACTIVATED;
+ }
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Transfer failed for channel {} with status {}", m_channel_id, status);
+ return status;
+ }
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::write_buffer_impl(const MemoryView &buffer)
+{
+ const uint32_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer.size());
+ const uint32_t desc_avail = (get_num_available() + m_pending_num_avail_offset) & m_state->m_descs.size_mask;
+ assert(desired_desc_num <= MAX_DESCS_COUNT);
+ assert(CB_AVAIL(m_state->m_descs, desc_avail, CB_TAIL(m_state->m_descs)) >= desired_desc_num);
+
+ const size_t buffer_write_offset = ((desc_avail + m_state->m_previous_tail) & m_state->m_descs.size_mask) * m_desc_list->desc_page_size();
+ const auto status = write_to_channel_buffer_cyclic(buffer, buffer_write_offset);
+ CHECK_SUCCESS(status);
+
+ m_pending_num_avail_offset = static_cast<uint16_t>(m_pending_num_avail_offset + desired_desc_num);
+
+ CHECK(!m_pending_buffers_sizes.full(), HAILO_INVALID_OPERATION, "Cannot add more pending buffers!");
+ m_pending_buffers_sizes.push_back(buffer.size());
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::write_to_channel_buffer_cyclic(const MemoryView &buffer, size_t channel_buffer_write_offset)
+{
+ CHECK(buffer.size() <= m_channel_buffer->size(), HAILO_INSUFFICIENT_BUFFER,
+ "Can't write {} bytes to channel buffer (channel buffer size {})",
+ buffer.size(), m_channel_buffer->size());
+
+ const auto size_to_end = m_channel_buffer->size() - channel_buffer_write_offset;
+ const auto first_chunk_size = std::min(size_to_end, buffer.size());
+ const auto first_chunk_addr = static_cast<uint8_t *>(m_channel_buffer->user_address()) + channel_buffer_write_offset;
+
+ // Copy from buffer to m_channel_buffer and then synchronize
+ std::memcpy(first_chunk_addr, buffer.data(), first_chunk_size);
+ auto status = m_channel_buffer->pimpl->synchronize(channel_buffer_write_offset, first_chunk_size);
+ CHECK_SUCCESS(status);
+
+ const auto remaining_size = buffer.size() - first_chunk_size;
+ if (remaining_size > 0) {
+ // Copy the remainder from buffer to m_channel_buffer and then synchronize
+ std::memcpy(m_channel_buffer->user_address(), buffer.data() + first_chunk_size, remaining_size);
+ status = m_channel_buffer->pimpl->synchronize(0, remaining_size);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::read_from_channel_buffer_cyclic(uint8_t *dest_buffer, size_t read_size, size_t channel_buffer_read_offset)
+{
+ CHECK(read_size <= m_channel_buffer->size(), HAILO_INSUFFICIENT_BUFFER,
+ "Can't read {} bytes from channel buffer (channel buffer size {})",
+ read_size, m_channel_buffer->size());
+
+ const auto size_to_end = m_channel_buffer->size() - channel_buffer_read_offset;
+ const auto first_chunk_size = std::min(size_to_end, read_size);
+ const auto first_chunk_addr = static_cast<uint8_t *>(m_channel_buffer->user_address()) + channel_buffer_read_offset;
+
+ // Synchronize m_channel_buffer and copy to dest_buffer
+ auto status = m_channel_buffer->pimpl->synchronize(channel_buffer_read_offset, first_chunk_size);
+ CHECK_SUCCESS(status);
+ std::memcpy(dest_buffer, first_chunk_addr, first_chunk_size);
+
+ const auto remaining_size = read_size - first_chunk_size;
+ if (remaining_size > 0) {
+ // Synchronize m_channel_buffer and copy remainder to dest_buffer
+ status = m_channel_buffer->pimpl->synchronize(0, remaining_size);
+ CHECK_SUCCESS(status);
+ std::memcpy(dest_buffer + first_chunk_size, m_channel_buffer->user_address(), remaining_size);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<BoundaryChannel::BufferState> BufferedChannel::get_buffer_state()
+{
+ BoundaryChannel::BufferState result;
+ result.num_avail = static_cast<uint16_t>(CB_HEAD(m_state->m_descs));
+ result.num_processed = static_cast<uint16_t>(CB_TAIL(m_state->m_descs));
+ auto hw_num_avail = m_host_registers.get_num_available();
+ CHECK_EXPECTED(hw_num_avail);
+ result.hw_num_avail = hw_num_avail.release();
+ auto hw_num_processed = get_hw_num_processed();
+ CHECK_EXPECTED(hw_num_processed);
+ result.hw_num_processed = hw_num_processed.release();
+
+ // Get a snapshot of the channel buffer
+ auto channel_buffer_copy = Buffer::create(m_channel_buffer->size());
+ CHECK_EXPECTED(channel_buffer_copy);
+ const auto status = read_from_channel_buffer_cyclic(channel_buffer_copy->data(), channel_buffer_copy->size(), 0);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ for (size_t offset = 0; offset < channel_buffer_copy->size(); offset += m_desc_list->desc_page_size()) {
+ auto chunk = Buffer::create(channel_buffer_copy->data() + offset, m_desc_list->desc_page_size());
+ CHECK_EXPECTED(chunk);
+ const auto abs_index = offset / m_desc_list->desc_page_size();
+ const auto desc_num = (abs_index >= static_cast<uint16_t>(m_state->m_previous_tail)) ?
+ abs_index - m_state->m_previous_tail :
+ m_state->m_descs.size - m_state->m_previous_tail + abs_index;
+ result.desc_buffer_pairing.emplace_back(static_cast<uint16_t>(desc_num), chunk.release());
+ }
+
+ return result;
+}
+
+hailo_status BufferedChannel::write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
+ const std::function<bool()> &should_cancel)
+{
+ std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
+
+ // Checking in advance so as not to timeout
+ CHECK(buffer.size() <= m_channel_buffer->size(), HAILO_INSUFFICIENT_BUFFER,
+ "Can't write {} bytes to channel buffer (channel buffer size {})",
+ buffer.size(), m_channel_buffer->size());
+
+ size_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer.size());
+ hailo_status channel_completion_status = HAILO_SUCCESS;
+ bool was_successful = m_state->transfer_buffer_cv().wait_for(state_guard, timeout, [this, desired_desc_num,
+ &should_cancel, &channel_completion_status] () {
+ if (m_state->m_is_aborted) {
+ return true;
+ }
+
+ if (should_cancel()) {
+ channel_completion_status = HAILO_STREAM_ABORTED_BY_USER;
+ return true;
+ }
+ // Limit writes to not surpass size of m_pending_buffers
+ size_t written_buffers_count = m_pending_buffers_sizes.size();
+ size_t sent_buffers_count = m_state->m_pending_buffers.size();
+ if (written_buffers_count + sent_buffers_count >= m_state->m_pending_buffers.capacity()) {
+ return false;
+ }
+
+ return is_ready_for_write(static_cast<uint16_t>(desired_desc_num));
+ });
+ if (m_state->m_is_aborted || (HAILO_STREAM_ABORTED_BY_USER == channel_completion_status)) {
+ LOGGER__INFO("wait_for in write_buffer was aborted!");
+ return HAILO_STREAM_ABORTED_BY_USER;
+ }
+ CHECK(was_successful, HAILO_TIMEOUT, "Got HAILO_TIMEOUT while waiting for descriptors in write_buffer (channel_id={})",
+ m_channel_id);
+ CHECK_SUCCESS(channel_completion_status);
+
+ return write_buffer_impl(buffer);
+}
+
+hailo_status BufferedChannel::send_pending_buffer_impl()
+{
+ CHECK(!m_pending_buffers_sizes.empty(), HAILO_INVALID_OPERATION, "There are no pending buffers to send!");
+
+ // For h2d, only the host need to get transfer done interrupts
+ InterruptsDomain last_desc_interrupts_domain = InterruptsDomain::HOST;
+ // If we measure latency, we need interrupt on the first descriptor
+ InterruptsDomain first_desc_interrupts_domain = (m_latency_meter != nullptr) ?
+ InterruptsDomain::HOST : InterruptsDomain::NONE;
+
+ auto status = prepare_descriptors(m_pending_buffers_sizes.front(), first_desc_interrupts_domain, last_desc_interrupts_domain);
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ LOGGER__INFO("sending pending buffer failed because stream is not activated");
+ // Stream was aborted during transfer - reset pending buffers
+ m_pending_num_avail_offset = 0;
+ while (m_pending_buffers_sizes.size() > 0) {
+ m_pending_buffers_sizes.pop_front();
+ }
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;
+
+ size_t desired_desc_num = m_desc_list->descriptors_in_buffer(m_pending_buffers_sizes.front());
+ m_pending_num_avail_offset = static_cast<uint16_t>(m_pending_num_avail_offset - desired_desc_num);
+
+ m_pending_buffers_sizes.pop_front();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::send_pending_buffer()
+{
+ {
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+
+ auto status = send_pending_buffer_impl();
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ LOGGER__INFO("stream is not activated");
+ return HAILO_STREAM_NOT_ACTIVATED;
+ } else {
+ CHECK_SUCCESS(status);
+ }
+ }
+ m_state->transfer_buffer_cv().notify_one();
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::transfer(std::shared_ptr<DmaMappedBuffer>, const TransferDoneCallback &, void *)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status BufferedChannel::cancel_pending_transfers()
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status BufferedChannel::transfer_h2d(void *buf, size_t count)
+{
+ auto status = write_buffer_impl(MemoryView(buf, count));
+ CHECK_SUCCESS(status);
+
+ status = send_pending_buffer_impl();
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ return status;
+ } else {
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::transfer_d2h(void *buf, size_t count)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ // Provide FW interrupt only in the end of the last transfer in the batch
+ InterruptsDomain first_desc_interrupts_domain = InterruptsDomain::NONE;
+ InterruptsDomain last_desc_interrupts_domain = (m_state->m_accumulated_transfers + 1 == m_transfers_per_axi_intr) ?
+ InterruptsDomain::BOTH : InterruptsDomain::HOST;
+
+ auto desired_desc_num = m_desc_list->descriptors_in_buffer(count);
+ assert(desired_desc_num <= MAX_DESCS_COUNT);
+ int desc_num = static_cast<int>(desired_desc_num);
+
+ int num_processed = CB_TAIL(m_state->m_descs);
+ int num_ready = CB_PROG(m_state->m_descs, num_processed, m_state->m_d2h_read_desc_index);
+ CHECK(num_ready >= desc_num, HAILO_OUT_OF_DESCRIPTORS,
+ "{} descriptors desired but only {} available", desc_num, num_ready);
+
+ const auto channel_buffer_read_offset = m_state->m_d2h_read_desc_index_abs * m_desc_list->desc_page_size();
+ status = read_from_channel_buffer_cyclic(static_cast<uint8_t *>(buf), count, channel_buffer_read_offset);
+ CHECK_SUCCESS(status);
+
+ m_state->m_d2h_read_desc_index = (m_state->m_d2h_read_desc_index + desc_num) & m_state->m_descs.size_mask;
+ m_state->m_d2h_read_desc_index_abs = (m_state->m_d2h_read_desc_index_abs + desc_num) & m_state->m_descs.size_mask;
+
+ // prepare descriptors for next recv
+ if (m_state->m_is_channel_activated) {
+ status = prepare_descriptors(count, first_desc_interrupts_domain, last_desc_interrupts_domain);
+ if (HAILO_STREAM_NOT_ACTIVATED == status) {
+ LOGGER__INFO("transfer d2h failed because stream is not activated");
+ return status;
+ }
+ CHECK_SUCCESS(status);
+ }
+
+ m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status BufferedChannel::prepare_descriptors(size_t transfer_size, InterruptsDomain first_desc_interrupts_domain,
+ InterruptsDomain last_desc_interrupts_domain)
+{
+ if (!m_state->m_is_channel_activated) {
+ return HAILO_STREAM_NOT_ACTIVATED;
+ }
+
+ // Calculate desired descriptors for the buffer
+ size_t desired_desc_num = m_desc_list->descriptors_in_buffer(transfer_size);
+ assert(desired_desc_num <= MAX_DESCS_COUNT);
+ uint16_t desc_num = static_cast<uint16_t>(desired_desc_num);
+
+ int num_available = get_num_available();
+ int num_processed = CB_TAIL(m_state->m_descs);
+ int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
+ if (num_free < desc_num) {
+ return HAILO_OUT_OF_DESCRIPTORS;
+ }
+
+ if (nullptr != m_latency_meter) {
+ // Program first descriptor
+ m_desc_list->program_single_descriptor((*m_desc_list)[num_available], m_desc_list->desc_page_size(),
+ first_desc_interrupts_domain);
+ }
+ auto actual_desc_count = m_desc_list->program_last_descriptor(transfer_size, last_desc_interrupts_domain,
+ num_available, true);
+ if (!actual_desc_count) {
+ LOGGER__ERROR("Failed to program desc_list for channel {}", m_channel_id);
+ return actual_desc_count.status();
+ }
+ assert (actual_desc_count.value() == desc_num);
+ int last_desc_avail = ((num_available + desc_num - 1) & m_state->m_descs.size_mask);
+
+ m_state->add_pending_buffer(num_available, last_desc_avail, m_direction, m_transfer_done_callback);
+ return inc_num_available(desc_num);
+}
+
+bool BufferedChannel::is_ready_for_write(const uint16_t desired_desc_num)
+{
+ const auto has_space_in_buffers = !m_state->m_pending_buffers.full();
+ const uint32_t desc_avail = (get_num_available() + m_pending_num_avail_offset) & m_state->m_descs.size_mask;
+ const int num_free = CB_AVAIL(m_state->m_descs, desc_avail, CB_TAIL(m_state->m_descs));
+ const auto has_desc_space = (num_free >= desired_desc_num);
+
+ return (has_space_in_buffers && has_desc_space);
+}
+
+bool BufferedChannel::is_ready_for_transfer_d2h(size_t buffer_size)
+{
+ size_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer_size);
+ assert(desired_desc_num <= MAX_DESCS_COUNT);
+ int desc_num = static_cast<int>(desired_desc_num);
+
+ if (m_state->m_pending_buffers.full()) {
+ return false;
+ }
+
+ int num_processed = CB_TAIL(m_state->m_descs);
+ int num_ready = CB_PROG(m_state->m_descs, num_processed, m_state->m_d2h_read_desc_index);
+ if (num_ready < desc_num) {
+ return false;
+ }
+ return true;
+}
+
+void BufferedChannel::notify_all()
+{
+ {
+ // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv
+ std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+ }
+ m_state->transfer_buffer_cv().notify_all();
+}
+
+} /* namespace vdma */
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file buffered_channel.hpp
+ * @brief BufferedChannel - Implements the BoundaryChannel interface, allowing for buffering of frames
+ * by managing a vdma buffer
+ **/
+
+#ifndef _HAILO_VDMA_BUFFERED_CHANNEL_HPP_
+#define _HAILO_VDMA_BUFFERED_CHANNEL_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/dma_mapped_buffer.hpp"
+
+#include "vdma/channel/boundary_channel.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+class BufferedChannel;
+using BufferedChannelPtr = std::shared_ptr<BufferedChannel>;
+
+class BufferedChannel : public BoundaryChannel
+{
+public:
+ static Expected<BufferedChannelPtr> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
+ uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,
+ uint16_t transfers_per_axi_intr = 1);
+
+ BufferedChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
+ uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, hailo_status &status);
+ BufferedChannel(const BufferedChannel &other) = delete;
+ BufferedChannel &operator=(const BufferedChannel &other) = delete;
+ BufferedChannel(BufferedChannel &&other) = delete;
+ BufferedChannel &operator=(BufferedChannel &&other) = delete;
+ virtual ~BufferedChannel() = default;
+
+ virtual hailo_status transfer(void *buf, size_t count) override;
+ // Either write_buffer + send_pending_buffer or transfer (h2d) should be used on a given channel, not both
+ virtual hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
+ const std::function<bool()> &should_cancel) override;
+ virtual hailo_status send_pending_buffer() override;
+ virtual hailo_status transfer(std::shared_ptr<DmaMappedBuffer>, const TransferDoneCallback &, void *) override;
+ virtual hailo_status cancel_pending_transfers() override;
+ virtual hailo_status complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers) override;
+ virtual hailo_status complete_channel_deactivation() override;
+
+ // Assumes that the channel is idle; doesn't block changes to the channel
+ // To be used for debugging purposes
+ virtual Expected<BoundaryChannel::BufferState> get_buffer_state() override;
+ virtual Expected<size_t> get_h2d_pending_frames_count() override;
+ virtual Expected<size_t> get_d2h_pending_descs_count() override;
+
+ virtual void notify_all() override;
+
+private:
+ static Expected<std::shared_ptr<DmaMappedBuffer>> create_mapped_buffer(uint32_t descs_count, uint16_t desc_page_size,
+ Direction direction, HailoRTDriver &driver);
+
+ hailo_status transfer_h2d(void *buf, size_t count);
+ hailo_status write_buffer_impl(const MemoryView &buffer);
+ hailo_status write_to_channel_buffer_cyclic(const MemoryView &buffer, size_t channel_buffer_write_offset);
+ hailo_status read_from_channel_buffer_cyclic(uint8_t *dest_buffer, size_t read_size, size_t channel_buffer_read_offset);
+ hailo_status send_pending_buffer_impl();
+ hailo_status transfer_d2h(void *buf, size_t count);
+ hailo_status prepare_descriptors(size_t transfer_size, InterruptsDomain first_desc_interrupts_domain,
+ InterruptsDomain last_desc_interrupts_domain);
+ hailo_status prepare_d2h_pending_descriptors(uint32_t transfer_size, uint32_t transfers_count);
+ bool is_ready_for_write(const uint16_t desired_desc_num);
+ virtual bool is_ready_for_transfer_d2h(size_t buffer_size) override;
+ hailo_status store_channel_buffer_state();
+
+ // TODO: m_channel_buffer gets bound to ChannelBase::m_desc_list meaning the desc in that list point to dma addrs
+ // that back m_channel_buffer. Because ChannelBase gets dtor'd after BufferedChannel, m_channel_buffer ChannelBase::m_desc_list
+ // will point to a freed buffer. This is ok because the channel objects only get dtor'd after they are deactivated by the fw.
+ // Might want to enforce this in hailort as well (e.g. desc lists can hold shared_ptrs to DmaMappedBuffer while they are bound).
+ // (HRT-9110)
+ std::shared_ptr<DmaMappedBuffer> m_channel_buffer;
+ // Using CircularArray because it won't allocate or free memory wile pushing and popping. The fact that it is circular is not relevant here
+ CircularArray<size_t> m_pending_buffers_sizes;
+ std::atomic_uint16_t m_pending_num_avail_offset;
+};
+
+} /* namespace vdma */
+} /* namespace hailort */
+
+#endif // _HAILO_VDMA_BUFFERED_CHANNEL_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file channel_base.cpp
+ * @brief Base class of Boundary Channel - responsible for all the basic vdma channel functionality that interacts with the
+ * driver and the registers
+ * The hierarchy is as follows:
+ * --------------------------------------------------------------------------------------------------------------
+ * | ChannelBase | (Base class - includes state and buffers)
+ * | | |
+ * | BoundaryChannel | (handles Boundary channels)
+ * --------------------------------------------------------------------------------------------------------------
+ **/
+#include "vdma/channel/channel_base.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+ChannelBase::ChannelBase(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
+ uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, hailo_status &status) :
+ m_channel_id(channel_id),
+ m_direction(direction),
+ m_driver(driver),
+ m_host_registers(driver, channel_id, direction),
+ m_desc_list(nullptr),
+ m_stream_name(stream_name),
+ m_latency_meter(latency_meter)
+{
+ if (channel_id.channel_index >= VDMA_CHANNELS_PER_ENGINE) {
+ LOGGER__ERROR("Invalid DMA channel index {}", channel_id.channel_index);
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ if (channel_id.engine_index >= driver.dma_engines_count()) {
+ LOGGER__ERROR("Invalid DMA engine index {}, max {}", channel_id.engine_index, driver.dma_engines_count());
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ if (descs_count > MAX_DESCS_COUNT) {
+ LOGGER__ERROR("Vdma channel descs_count mustn't be larger than {}", MAX_DESCS_COUNT);
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ auto state = VdmaChannelState::create(descs_count, (nullptr != m_latency_meter));
+ if(!state) {
+ LOGGER__ERROR("Failed to create channel's state");
+ status = state.status();
+ return;
+ }
+ m_state = state.release();
+
+ // Allocate descriptor list (host side)
+ status = allocate_descriptor_list(descs_count, desc_page_size);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to allocate Vdma buffer for channel transfer! status={}", status);
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+hailo_status ChannelBase::set_num_avail_value(uint16_t new_value)
+{
+ // TODO - HRT-7885 : add check in driver
+ CHECK(m_state->m_is_channel_activated, HAILO_STREAM_NOT_ACTIVATED,
+ "Error, can't set num available when stream is not activated");
+
+ auto status = m_host_registers.set_num_available(new_value);
+ CHECK_SUCCESS(status, "Fail to write vdma num available register");
+
+#ifndef NDEBUG
+ // Validate synchronization with HW
+ auto hw_num_avail = m_host_registers.get_num_available();
+ assert(hw_num_avail);
+ assert(hw_num_avail.value() == new_value);
+#endif
+ return HAILO_SUCCESS;
+}
+
+hailo_status ChannelBase::inc_num_available(uint16_t value)
+{
+ //TODO: validate that count is added.
+ int num_available = get_num_available();
+ int num_processed = CB_TAIL(m_state->m_descs);
+ int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
+ if (value > num_free) {
+ return HAILO_OUT_OF_DESCRIPTORS;
+ }
+
+ CB_ENQUEUE(m_state->m_descs, value);
+ num_available = (num_available + value) & m_state->m_descs.size_mask;
+ return set_num_avail_value(static_cast<uint16_t>(num_available));
+}
+
+bool ChannelBase::is_desc_between(uint16_t begin, uint16_t end, uint16_t desc)
+{
+ if (begin == end) {
+ // There is nothing between
+ return false;
+ }
+ if (begin < end) {
+ // desc needs to be in [begin, end)
+ return (begin <= desc) && (desc < end);
+ }
+ else {
+ // desc needs to be in [0, end) or [begin, m_state->m_descs.size()-1]
+ return (desc < end) || (begin <= desc);
+ }
+}
+
+uint16_t ChannelBase::get_num_available()
+{
+ uint16_t num_available = (uint16_t)CB_HEAD(m_state->m_descs);
+
+#ifndef NDEBUG
+ // Validate synchronization with HW
+ auto hw_num_avail = m_host_registers.get_num_available();
+ assert(hw_num_avail);
+
+ // On case of channel aborted, the num_available is set to 0 (so we don't accept sync)
+ auto is_aborted_exp = m_host_registers.is_aborted();
+ assert(is_aborted_exp);
+
+ if (m_state->m_is_channel_activated && !is_aborted_exp.value()) {
+ assert(hw_num_avail.value() == num_available);
+ }
+#endif
+ return num_available;
+}
+
+Expected<uint16_t> ChannelBase::get_hw_num_processed()
+{
+ auto hw_num_processed = m_host_registers.get_num_processed();
+ CHECK_EXPECTED(hw_num_processed, "Fail to read vdma num processed register");
+
+ // Although the hw_num_processed should be a number between 0 and m_descs.size-1, if
+ // m_desc.size < 0x10000 (the maximum desc size), the actual hw_num_processed is a number
+ // between 1 and m_descs.size. Therefore the value can be m_descs.size, in this case we change it
+ // to zero.
+ return static_cast<uint16_t>(hw_num_processed.value() & m_state->m_descs.size_mask);
+}
+
+ChannelBase::Direction ChannelBase::other_direction(Direction direction)
+{
+ return (Direction::H2D == direction) ? Direction::D2H : Direction::H2D;
+}
+
+hailo_status ChannelBase::allocate_descriptor_list(uint32_t descs_count, uint16_t desc_page_size)
+{
+ auto desc_page_size_value = m_driver.calc_desc_page_size(desc_page_size);
+ CHECK(is_powerof2(desc_page_size_value), HAILO_INVALID_ARGUMENT, "Descriptor page_size must be a power of two.");
+
+ auto desc_list_exp = DescriptorList::create(descs_count, desc_page_size_value, m_driver);
+ CHECK_EXPECTED_AS_STATUS(desc_list_exp);
+
+ m_desc_list = make_shared_nothrow<DescriptorList>(desc_list_exp.release());
+ CHECK_NOT_NULL(m_desc_list, HAILO_OUT_OF_HOST_MEMORY);
+
+ return HAILO_SUCCESS;
+}
+
+size_t ChannelBase::get_transfers_count_in_buffer(size_t transfer_size)
+{
+ const auto descs_in_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
+ const auto descs_count = CB_SIZE(m_state->m_descs);
+ return (descs_count - 1) / descs_in_transfer;
+}
+
+Expected<uint16_t> ChannelBase::update_latency_meter()
+{
+ uint16_t last_num_processed = m_state->m_last_timestamp_num_processed;
+
+ auto timestamp_list = m_driver.vdma_interrupts_read_timestamps(m_channel_id);
+ CHECK_EXPECTED(timestamp_list);
+
+ if (0 == timestamp_list->count) {
+ // No new timestamps for this channel, return the previous result
+ return Expected<uint16_t>(last_num_processed);
+ }
+
+ // TODO: now we have more iterations than we need. We know that the pending buffers + the timestamp list
+ // are ordered. If pending_buffer[i] is not in any of the timestamps_list[0, 1, ... k], then also pending_buffer[i+1,i+2,...]
+ // not in those timestamps
+
+ for (const auto &pending_buffer : m_state->m_pending_buffers) {
+ uint16_t latency_desc = static_cast<uint16_t>(pending_buffer.latency_measure_desc);
+ for (size_t i = 0; i < timestamp_list->count; i++) {
+ const auto &irq_timestamp = timestamp_list->timestamp_list[i];
+ const auto desc_num_processed = static_cast<uint16_t>(irq_timestamp.desc_num_processed & m_state->m_descs.size_mask);
+ if (is_desc_between(last_num_processed, desc_num_processed, latency_desc)) {
+ if (m_direction == Direction::H2D) {
+ m_latency_meter->add_start_sample(irq_timestamp.timestamp);
+ }
+ else {
+ m_latency_meter->add_end_sample(m_stream_name, irq_timestamp.timestamp);
+ }
+ break;
+ }
+ }
+ }
+
+ m_state->m_last_timestamp_num_processed = static_cast<uint16_t>(
+ timestamp_list->timestamp_list[timestamp_list->count-1].desc_num_processed & m_state->m_descs.size_mask);
+ return Expected<uint16_t>(m_state->m_last_timestamp_num_processed);
+}
+
+uint32_t ChannelBase::calculate_descriptors_count(uint32_t buffer_size) const
+{
+ return DescriptorList::calculate_descriptors_count(buffer_size, 1, m_desc_list->desc_page_size());
+}
+
+} /* namespace vdma */
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file channel_base.hpp
+ * @brief Base class of Boundary Channel - responsible for all the basic vdma channel functionality that interacts with the
+ * driver and the registers
+ * The hierarchy is as follows:
+ * --------------------------------------------------------------------------------------------------------------
+ * | ChannelBase | (Base class - includes state and buffers)
+ * | | |
+ * | BoundaryChannel | (handles Boundary channels)
+ * --------------------------------------------------------------------------------------------------------------
+ **/
+
+#ifndef _HAILO_VDMA_CHANNEL_BASE_HPP_
+#define _HAILO_VDMA_CHANNEL_BASE_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/buffer.hpp"
+
+#include "common/latency_meter.hpp"
+
+#include "vdma/channel/vdma_channel_regs.hpp"
+#include "vdma/memory/sg_buffer.hpp"
+#include "vdma/memory/descriptor_list.hpp"
+#include "vdma/channel/channel_id.hpp"
+#include "vdma/channel/channel_state.hpp"
+
+#include <mutex>
+#include <condition_variable>
+
+
+namespace hailort {
+namespace vdma {
+
+class ChannelBase
+{
+public:
+ using Direction = HailoRTDriver::DmaDirection;
+
+ ChannelBase(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
+ uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, hailo_status &status);
+ ChannelBase(const ChannelBase &other) = delete;
+ ChannelBase &operator=(const ChannelBase &other) = delete;
+ ChannelBase(ChannelBase &&other) = delete;
+ ChannelBase &operator=(ChannelBase &&other) = delete;
+ virtual ~ChannelBase() = default;
+
+ vdma::ChannelId get_channel_id() const
+ {
+ return m_channel_id;
+ }
+
+ uint16_t get_page_size()
+ {
+ return m_desc_list->desc_page_size();
+ }
+
+ const std::string &stream_name() const
+ {
+ return m_stream_name;
+ }
+
+ size_t get_transfers_count_in_buffer(size_t transfer_size);
+ size_t get_buffer_size() const;
+ uint32_t calculate_descriptors_count(uint32_t buffer_size) const;
+
+ std::shared_ptr<DescriptorList> get_desc_list()
+ {
+ return m_desc_list;
+ }
+
+protected:
+ const vdma::ChannelId m_channel_id;
+ const Direction m_direction;
+ HailoRTDriver &m_driver;
+ VdmaChannelRegs m_host_registers;
+ std::shared_ptr<DescriptorList> m_desc_list; // Host side descriptor list
+ const std::string m_stream_name;
+ std::unique_ptr<VdmaChannelState> m_state;
+ LatencyMeterPtr m_latency_meter;
+
+ static bool is_desc_between(uint16_t begin, uint16_t end, uint16_t desc);
+ // Returns the desc index of the last desc whose timestamp was measured in the driver
+ Expected<uint16_t> update_latency_meter();
+ Expected<bool> is_aborted();
+ hailo_status set_num_avail_value(uint16_t new_value);
+ uint16_t get_num_available();
+ Expected<uint16_t> get_hw_num_processed();
+ hailo_status inc_num_available(uint16_t value);
+ static Direction other_direction(const Direction direction);
+
+private:
+ hailo_status allocate_descriptor_list(uint32_t descs_count, uint16_t desc_page_size);
+};
+
+} /* namespace vdma */
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_CHANNEL_BASE_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file channel_index.hpp
+ * @brief Struct used for channel identifier - (engine_index, channel_index) pair.
+ **/
+
+#ifndef _HAILO_VDMA_CHANNEL_ID_HPP_
+#define _HAILO_VDMA_CHANNEL_ID_HPP_
+
+#include "hailo/hailort.h"
+#include "common/logger_macros.hpp"
+#include <spdlog/fmt/bundled/format.h>
+#include <sstream>
+
+
+namespace hailort {
+namespace vdma {
+
+// TODO: HRT-6949 don't use default engine index.
+static constexpr uint8_t DEFAULT_ENGINE_INDEX = 0;
+
+/**
+ * For each dma engine we have 16 inputs channels and 16 output channels.
+ * The amount of engines is determined by the driver.
+ */
+struct ChannelId
+{
+ uint8_t engine_index;
+ uint8_t channel_index;
+
+ // Allow put `ChannelId` inside std::set
+ friend bool operator<(const ChannelId &a, const ChannelId &b)
+ {
+ return std::make_pair(a.engine_index, a.channel_index) < std::make_pair(b.engine_index, b.channel_index);
+ }
+};
+
+} /* namespace vdma */
+} /* namespace hailort */
+
+
+template<>
+struct fmt::formatter<hailort::vdma::ChannelId> : fmt::formatter<fmt::string_view> {
+ template <typename FormatContext>
+ auto format(const hailort::vdma::ChannelId &input, FormatContext& ctx) -> decltype(ctx.out()) {
+ std::stringstream ss;
+ ss << static_cast<uint32_t>(input.engine_index) << ":"
+ << static_cast<uint32_t>(input.channel_index);
+ return fmt::formatter<fmt::string_view>::format(ss.str(), ctx);
+ }
+};
+
+#endif /* _HAILO_VDMA_CHANNEL_ID_HPP_ */
--- /dev/null
+#include "vdma/channel/channel_state.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+#ifndef _MSC_VER
+RecursiveSharedMutex::RecursiveSharedMutex()
+{
+ // Make sharable mutex
+ pthread_mutexattr_t mutex_attrs{};
+ int err = pthread_mutexattr_init(&mutex_attrs);
+ if (0 != err) {
+ LOGGER__CRITICAL("Failed init mutex attr, aborting");
+ std::abort();
+ }
+
+ err = pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED);
+ if (0 != err) {
+ LOGGER__CRITICAL("pthread_mutexattr_setpshared failed");
+ std::abort();
+ }
+
+ err = pthread_mutexattr_settype(&mutex_attrs, PTHREAD_MUTEX_RECURSIVE);
+ if (0 != err) {
+ LOGGER__CRITICAL("pthread_mutexattr_settype failed");
+ std::abort();
+ }
+
+ err = pthread_mutex_init(&m_mutex, &mutex_attrs);
+ if (0 != pthread_mutexattr_destroy(&mutex_attrs)) {
+ LOGGER__CRITICAL("Failed destroy mutexattr");
+ // continue
+ }
+ if (0 != err) {
+ LOGGER__CRITICAL("Failed init mutex, aborting");
+ std::abort();
+ }
+}
+
+RecursiveSharedMutex::~RecursiveSharedMutex()
+{
+ int err = pthread_mutex_destroy(&m_mutex);
+ if (0 != err) {
+ LOGGER__ERROR("Failed destroy shared mutex, errno {}", err);
+ }
+}
+
+void RecursiveSharedMutex::lock()
+{
+ int err = pthread_mutex_lock(&m_mutex);
+ if (0 != err) {
+ LOGGER__ERROR("Failed lock shared mutex, errno {}", err);
+ std::abort();
+ }
+}
+
+void RecursiveSharedMutex::unlock()
+{
+ int err = pthread_mutex_unlock(&m_mutex);
+ if (0 != err) {
+ LOGGER__ERROR("Failed unlock shared mutex, errno {}", err);
+ std::abort();
+ }
+}
+
+SharedConditionVariable::SharedConditionVariable()
+{
+ // Make sharable condvar
+ pthread_condattr_t cond_attrs{};
+ int err = pthread_condattr_init(&cond_attrs);
+ if (0 != err) {
+ LOGGER__CRITICAL("Failed init condition variable attr, aborting");
+ std::abort();
+ }
+
+ err = pthread_condattr_setpshared(&cond_attrs, PTHREAD_PROCESS_SHARED);
+ if (0 != err) {
+ LOGGER__CRITICAL("pthread_condattr_setpshared failed");
+ std::abort();
+ }
+
+ err = pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC);
+ if (0 != err) {
+ LOGGER__CRITICAL("pthread_condattr_setclock failed");
+ std::abort();
+ }
+
+ err = pthread_cond_init(&m_cond, &cond_attrs);
+ if (0 != pthread_condattr_destroy(&cond_attrs)) {
+ LOGGER__CRITICAL("Failed destroy condattr");
+ // continue
+ }
+ if (0 != err) {
+ LOGGER__CRITICAL("Failed init mutex, aborting");
+ std::abort();
+ }
+}
+
+SharedConditionVariable::~SharedConditionVariable()
+{
+ int err = pthread_cond_destroy(&m_cond);
+ if (0 != err) {
+ LOGGER__ERROR("Failed destory vdma channel condition varialbe, errno {}", err);
+ }
+}
+
+// Get the absolute time for the given timeout - calculate now() + timeout_ns
+// using system CLOCK_MONOTONIC (Used for pthread condition variable wait)
+static struct timespec get_absolute_time(std::chrono::nanoseconds timeout_ns)
+{
+ // Using chrono with timespec types to avoid casts
+ using ts_seconds = std::chrono::duration<decltype(timespec::tv_sec)>;
+ using ts_nanoseconds = std::chrono::duration<decltype(timespec::tv_nsec), std::nano>;
+
+ struct timespec current_ts{};
+ clock_gettime(CLOCK_MONOTONIC, ¤t_ts);
+
+ assert((current_ts.tv_sec + std::chrono::duration_cast<ts_seconds>(timeout_ns).count()) <
+ std::numeric_limits<decltype(timespec::tv_sec)>::max());
+ auto absolute_sec = ts_seconds(current_ts.tv_sec) + std::chrono::duration_cast<ts_seconds>(timeout_ns);
+ assert(current_ts.tv_nsec <= std::nano::den);
+ auto absolute_nsec = ts_nanoseconds(current_ts.tv_nsec) +
+ std::chrono::duration_cast<ts_nanoseconds>(timeout_ns % std::chrono::seconds(1));
+
+ // Nanos overflow
+ if (absolute_nsec.count() >= std::nano::den) {
+ absolute_sec += ts_seconds(1);
+ absolute_nsec = absolute_nsec % ts_seconds(1);
+ }
+
+ return timespec {
+ .tv_sec = absolute_sec.count(),
+ .tv_nsec = absolute_nsec.count()
+ };
+}
+
+bool SharedConditionVariable::wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout, std::function<bool()> condition)
+{
+ if (UINT32_MAX == timeout.count()) {
+ // Infinity wait
+ int err = 0;
+ while (!condition() && err == 0) {
+ err = pthread_cond_wait(&m_cond, lock.mutex()->native_handle());
+ }
+ if (err != 0) {
+ LOGGER__CRITICAL("Error waiting for shared condition variable: {}", err);
+ std::abort();
+ }
+ return true;
+ }
+ else if (0 == timeout.count()) {
+ // Special case for 0 timeout - we don't want to mess with absolute time
+ return condition();
+ } else {
+ // Timed wait
+ auto ts = get_absolute_time(timeout);
+
+ int err = 0;
+ while (!condition() && err == 0) {
+ err = pthread_cond_timedwait(&m_cond, lock.mutex()->native_handle(), &ts);
+ }
+ if ((err != 0) && (err != ETIMEDOUT)) {
+ LOGGER__CRITICAL("Error waiting for shared condition variable: {}", err);
+ std::abort();
+ }
+ return err == 0;
+ }
+}
+
+void SharedConditionVariable::notify_one()
+{
+ pthread_cond_signal(&m_cond);
+}
+
+void SharedConditionVariable::notify_all()
+{
+ pthread_cond_broadcast(&m_cond);
+}
+
+#endif /* _MSC_VER */
+
+Expected<std::unique_ptr<VdmaChannelState>> VdmaChannelState::create(uint32_t descs_count, bool measure_latency)
+{
+ // Note: we implement operator new so the state object will be shared with forked processes.
+ auto state = make_unique_nothrow<VdmaChannelState>(descs_count, measure_latency);
+ CHECK_NOT_NULL_AS_EXPECTED(state, HAILO_OUT_OF_HOST_MEMORY);
+ return state;
+}
+
+VdmaChannelState::VdmaChannelState(uint32_t descs_count, bool measure_latency) :
+ m_is_channel_activated(false),
+ // If we measuring latency, we may get 2 interrupts for each input channel (first descriptor and last descriptor).
+ // Hence we must limit the transfers count to half of the actual transfers count.
+ m_pending_buffers(measure_latency ? PENDING_BUFFERS_SIZE/2 : PENDING_BUFFERS_SIZE),
+ m_d2h_read_desc_index(0),
+ m_d2h_read_desc_index_abs(0),
+ m_is_aborted(false),
+ m_previous_tail(0),
+ m_desc_list_delta(0),
+ m_last_timestamp_num_processed(0),
+ m_accumulated_transfers(0)
+{
+ CB_INIT(m_descs, descs_count);
+}
+
+void VdmaChannelState::reset_counters()
+{
+ CB_RESET(m_descs);
+ m_pending_buffers.reset();
+ m_last_timestamp_num_processed = 0;
+ m_accumulated_transfers = 0;
+}
+
+void VdmaChannelState::reset_previous_state_counters()
+{
+ m_previous_tail = 0;
+ m_desc_list_delta = 0;
+ m_d2h_read_desc_index = 0;
+ m_d2h_read_desc_index_abs = 0;
+}
+
+void VdmaChannelState::add_pending_buffer(uint32_t first_desc, uint32_t last_desc, HailoRTDriver::DmaDirection direction,
+ const TransferDoneCallback &on_transfer_done, std::shared_ptr<DmaMappedBuffer> buffer, void *opaque)
+{
+ if (m_pending_buffers.full()) {
+ // TODO- HRT-8900 : Fix log and check if should return error
+ LOGGER__ERROR("no avail space");
+ }
+ PendingBuffer pending_buffer{};
+ pending_buffer.last_desc = last_desc;
+ pending_buffer.latency_measure_desc = (direction == HailoRTDriver::DmaDirection::H2D) ? first_desc : last_desc;
+ pending_buffer.on_transfer_done = on_transfer_done;
+ pending_buffer.buffer = buffer;
+ pending_buffer.opaque = opaque;
+ m_pending_buffers.push_back(std::move(pending_buffer));
+}
+
+} /* namespace vdma */
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file channel_state.hpp
+ * @brief Current state of Vdma Channel
+ *
+ * <doc>
+ **/
+
+#ifndef _HAILO_VDMA_CHANNEL_STATE_HPP_
+#define _HAILO_VDMA_CHANNEL_STATE_HPP_
+
+#include "hailo/hailort.h"
+#include "os/hailort_driver.hpp"
+#include "common/circular_buffer.hpp"
+#include "hailo/dma_mapped_buffer.hpp"
+#include "hailo/stream.hpp"
+
+#include <array>
+#include <condition_variable>
+
+#ifndef _MSC_VER
+#include <sys/mman.h>
+#endif
+
+
+namespace hailort {
+namespace vdma {
+
+struct PendingBuffer {
+ uint32_t last_desc;
+ uint32_t latency_measure_desc;
+ TransferDoneCallback on_transfer_done;
+ std::shared_ptr<DmaMappedBuffer> buffer;
+ void *opaque;
+};
+
+class ChannelBase;
+class BoundaryChannel;
+class AsyncChannel;
+class BufferedChannel;
+
+
+#ifndef _MSC_VER
+// Special mutex and condition variable objects that can be shared between forked processes (Not needed on windows,
+// because there is no fork).
+class RecursiveSharedMutex final {
+public:
+ RecursiveSharedMutex();
+ ~RecursiveSharedMutex();
+
+ RecursiveSharedMutex(const RecursiveSharedMutex &) = delete;
+ RecursiveSharedMutex &operator=(const RecursiveSharedMutex &) = delete;
+ RecursiveSharedMutex(RecursiveSharedMutex &&) = delete;
+ RecursiveSharedMutex &operator=(RecursiveSharedMutex &&) = delete;
+
+ void lock();
+ void unlock();
+
+ pthread_mutex_t *native_handle()
+ {
+ return &m_mutex;
+ }
+
+private:
+ pthread_mutex_t m_mutex;
+};
+
+class SharedConditionVariable final {
+public:
+
+ SharedConditionVariable();
+ ~SharedConditionVariable();
+
+ SharedConditionVariable(const SharedConditionVariable &) = delete;
+ SharedConditionVariable &operator=(const SharedConditionVariable &) = delete;
+ SharedConditionVariable(SharedConditionVariable &&) = delete;
+ SharedConditionVariable &operator=(SharedConditionVariable &&) = delete;
+
+ bool wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout, std::function<bool()> condition);
+ void notify_one();
+ void notify_all();
+
+private:
+ pthread_cond_t m_cond;
+};
+#else /* _MSC_VER */
+using RecursiveSharedMutex = std::recursive_mutex;
+using SharedConditionVariable = std::condition_variable_any;
+#endif
+
+class VdmaChannelState final
+{
+public:
+ static Expected<std::unique_ptr<VdmaChannelState>> create(uint32_t descs_count, bool measure_latency);
+
+ VdmaChannelState(uint32_t descs_count, bool measure_latency);
+ VdmaChannelState(const VdmaChannelState &other) = delete;
+ VdmaChannelState(VdmaChannelState &&other) = delete;
+ ~VdmaChannelState() = default;
+
+ void reset_counters();
+ void reset_previous_state_counters();
+ // Each transfer on the channel is logged by a PendingBuffer:
+ // - first_desc/last_desc - first and last descriptors of the transfer
+ // - direction - transfer's direction
+ // - on_transfer_done - callback to be called once the transfer is complete (i.e. when an interrupt is received on last_desc)
+ // - buffer - points to the vdma mapped buffer being transferred (may be null)
+ // - opaque - context to be transferred to the callback (may be null)
+ void add_pending_buffer(uint32_t first_desc, uint32_t last_desc, HailoRTDriver::DmaDirection direction,
+ const TransferDoneCallback &on_transfer_done, std::shared_ptr<DmaMappedBuffer> buffer = nullptr, void *opaque = nullptr);
+
+ RecursiveSharedMutex &mutex()
+ {
+ return m_state_lock;
+ }
+
+ SharedConditionVariable &transfer_buffer_cv()
+ {
+ return m_can_transfer_buffer_cv;
+ }
+
+#ifndef _MSC_VER
+ // The VdmaChannelState must remain in a shared memory scope, so we implement the new/delete operators (only on
+ // non-windows machines).
+ void* operator new(std::size_t size) = delete;
+ void* operator new(std::size_t size, const std::nothrow_t&) throw() {
+ // Map a shared memory region into the virtual memory of the process
+ void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (ptr == MAP_FAILED) {
+ return nullptr;
+ }
+ return ptr;
+ }
+
+ // Custom operator delete function that unmaps the shared memory region
+ void operator delete(void* ptr, std::size_t size) {
+ munmap(ptr, size);
+ }
+#endif /* _MSC_VER */
+
+ friend class ChannelBase;
+ friend class BoundaryChannel;
+ friend class AsyncChannel;
+ friend class BufferedChannel;
+
+private:
+ RecursiveSharedMutex m_state_lock;
+ SharedConditionVariable m_can_transfer_buffer_cv;
+
+ bool m_is_channel_activated;
+
+ // On pending buffer with must use std::array because it relays on the shared memory (and std::vector uses new malloc)
+ CircularArray<PendingBuffer, std::array<PendingBuffer, PENDING_BUFFERS_SIZE>> m_pending_buffers;
+ // TODO: describe why we must have our own num_available and num_proc.
+ // it's not just for efficiency but its critical to avoid a potential bug - see Avigail email.
+ // TODO: Consider C11 stdatomic
+ circbuf_t m_descs;
+ // m_d2h_read_desc_index and m_d2h_read_desc_index_abs are the index of the first desc containing frames to be
+ // copied to the user ("ready" frames in a D2H buffered channel). m_d2h_read_desc_index is relative to the
+ // first desc in the desc list, whereas m_d2h_read_desc_index_abs is relative to the start of the vdma buffer.
+ int m_d2h_read_desc_index;
+ int m_d2h_read_desc_index_abs;
+ bool m_is_aborted;
+ // Points to the tail of the desc list when the channel is stopped (starts at zero)
+ int m_previous_tail;
+ int m_desc_list_delta;
+ // Contains the last num_processed of the last interrupt (only used on latency measurement)
+ uint16_t m_last_timestamp_num_processed;
+ size_t m_accumulated_transfers;
+};
+
+} /* namespace hailort */
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_CHANNEL_STATE_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file interrupts_dispatcher.cpp
+ * @brief Manages a thread that is waiting for channel interrupts.
+ **/
+
+#include "interrupts_dispatcher.hpp"
+#include "hailo/hailort_common.hpp"
+#include "common/os_utils.hpp"
+
+namespace hailort {
+namespace vdma {
+
+Expected<std::unique_ptr<InterruptsDispatcher>> InterruptsDispatcher::create(std::reference_wrapper<HailoRTDriver> driver)
+{
+ auto thread = make_unique_nothrow<InterruptsDispatcher>(driver);
+ CHECK_NOT_NULL_AS_EXPECTED(thread, HAILO_OUT_OF_HOST_MEMORY);
+ return thread;
+}
+
+InterruptsDispatcher::InterruptsDispatcher(std::reference_wrapper<HailoRTDriver> driver) :
+ m_driver(driver),
+ m_is_running(false),
+ m_channels_bitmap()
+{}
+
+InterruptsDispatcher::~InterruptsDispatcher()
+{
+ if (m_is_running) {
+ stop();
+ }
+}
+
+hailo_status InterruptsDispatcher::start(const ChannelsBitmap &channels_bitmap, bool enable_timestamp_measure,
+ const ProcessIrqCallback &process_irq)
+{
+ CHECK(!m_is_running, HAILO_INVALID_OPERATION, "Interrupt thread already running");
+ assert(m_channel_threads.empty());
+ assert(m_channels_bitmap == ChannelsBitmap{});
+
+ m_channels_bitmap = channels_bitmap;
+
+ auto status = m_driver.get().vdma_interrupts_enable(m_channels_bitmap, enable_timestamp_measure);
+ CHECK_SUCCESS(status, "Failed to enable vdma interrupts");
+
+ // Setting m_is_running will allow the threads to run
+ m_is_running = true;
+ m_channel_threads.emplace_back([this, process_irq]() {
+ // m_channels_bitmap may be changed by InterruptsDispatcher::stop. To avoid wait for 0 channels,
+ // we use copy of m_channels_bitmap.
+ ChannelsBitmap channels_bitmap_local = m_channels_bitmap;
+ wait_interrupts(channels_bitmap_local, process_irq);
+ });
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status InterruptsDispatcher::stop()
+{
+ CHECK(m_is_running, HAILO_INVALID_OPERATION, "Interrupts thread not started");
+ assert(!m_channel_threads.empty());
+ assert(m_channels_bitmap != ChannelsBitmap{});
+
+ // Signal threads to stop execution
+ m_is_running = false;
+
+ // Calling disable interrupts will cause the vdma_interrupts_wait to return.
+ auto status = m_driver.get().vdma_interrupts_disable(m_channels_bitmap);
+ CHECK_SUCCESS(status, "Failed to disable vdma interrupts");
+
+ m_channels_bitmap = ChannelsBitmap{};
+ for (auto &thread : m_channel_threads) {
+ if (thread.joinable()) {
+ thread.join();
+ }
+ }
+ m_channel_threads.clear();
+
+ return HAILO_SUCCESS;
+}
+
+void InterruptsDispatcher::wait_interrupts(const ChannelsBitmap &channels_bitmap, const ProcessIrqCallback &process_irq)
+{
+ OsUtils::set_current_thread_name("CHANNEL_INTR");
+ while (m_is_running) {
+ // vdma_interrupts_wait is a blocking function that returns in this scenarios:
+ // 1. We got a new interrupts, irq_data will be passed to the process_irq callback
+ // 2. vdma_interrupts_disable will be called, vdma_interrupts_wait will return with an empty list.
+ // 3. Other error returns - shouldn't really happen, we exit the interrupt thread.
+ auto irq_data = m_driver.get().vdma_interrupts_wait(channels_bitmap);
+ if (!irq_data.has_value()) {
+ LOGGER__ERROR("Interrupt thread exit with {}", irq_data.status());
+ break;
+ }
+
+ if (irq_data->channels_count > 0) {
+ process_irq(irq_data.release());
+ }
+ }
+}
+
+} /* namespace vdma */
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file interrupts_dispatcher.hpp
+ * @brief Manages a thread that is waiting for channel interrupts.
+ **/
+
+#ifndef _HAILO_VDMA_INTERRUPTS_DISPATCHER_HPP_
+#define _HAILO_VDMA_INTERRUPTS_DISPATCHER_HPP_
+
+#include "os/hailort_driver.hpp"
+#include <thread>
+#include <functional>
+
+namespace hailort {
+namespace vdma {
+
+
+/// When needed, creates thread (or threads) that waits for interrupts on all channels.
+class InterruptsDispatcher final {
+public:
+ // The actual irq process callback, should run quickly (blocks the interrupts thread).
+ using ProcessIrqCallback = std::function<void(IrqData &&irq_data)>;
+
+ static Expected<std::unique_ptr<InterruptsDispatcher>> create(std::reference_wrapper<HailoRTDriver> driver);
+ explicit InterruptsDispatcher(std::reference_wrapper<HailoRTDriver> driver);
+ ~InterruptsDispatcher();
+
+ InterruptsDispatcher(const InterruptsDispatcher &) = delete;
+ InterruptsDispatcher &operator=(const InterruptsDispatcher &) = delete;
+ InterruptsDispatcher(InterruptsDispatcher &&) = delete;
+ InterruptsDispatcher &operator=(InterruptsDispatcher &&) = delete;
+
+ // TODO: HRT-9590 remove interrupt_thread_per_channel, use it by default
+ hailo_status start(const ChannelsBitmap &channels_bitmap, bool enable_timestamp_measure,
+ const ProcessIrqCallback &process_irq);
+ hailo_status stop();
+
+private:
+
+ void wait_interrupts(const ChannelsBitmap &channels_bitmap, const ProcessIrqCallback &process_irq);
+
+ const std::reference_wrapper<HailoRTDriver> m_driver;
+ std::atomic<bool> m_is_running;
+ ChannelsBitmap m_channels_bitmap;
+ std::vector<std::thread> m_channel_threads;
+};
+
+} /* namespace vdma */
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_INTERRUPTS_DISPATCHER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_channel_regs.hpp
+ * @brief utilties used to parse/modify PLDA Vdma channel registers
+ **/
+
+#ifndef _HAILO_VDMA_CHANNEL__REGS_HPP_
+#define _HAILO_VDMA_CHANNEL__REGS_HPP_
+
+#include "hw_consts.hpp"
+#include "hailo/expected.hpp"
+#include "os/hailort_driver.hpp"
+
+#include <cstdint>
+
+namespace hailort
+{
+
+#define DESCPRIPTOR_LIST_MAX_DEPTH (16)
+
+
+inline bool vdma_channel_control_is_aborted(uint8_t control_reg)
+{
+ return (control_reg & 1) == 0;
+}
+
+
+class VdmaChannelRegs final {
+public:
+ VdmaChannelRegs(HailoRTDriver &driver, vdma::ChannelId channel_id, HailoRTDriver::DmaDirection direction) :
+ m_driver(driver),
+ m_channel_id(channel_id),
+ m_direction(direction)
+ {}
+
+ Expected<uint16_t> get_num_available()
+ {
+ return read_integer<uint16_t>(VDMA_CHANNEL_NUM_AVAIL_OFFSET);
+ }
+
+ hailo_status set_num_available(uint16_t value)
+ {
+ return write_integer<uint16_t>(VDMA_CHANNEL_NUM_AVAIL_OFFSET, value);
+ }
+
+ Expected<uint16_t> get_num_processed()
+ {
+ return read_integer<uint16_t>(VDMA_CHANNEL_NUM_PROC_OFFSET);
+ }
+
+#ifndef NDEBUG
+ Expected<bool> is_aborted()
+ {
+ const auto control_reg = read_integer<uint8_t>(VDMA_CHANNEL_CONTROL_OFFSET);
+ CHECK_EXPECTED(control_reg);
+ return vdma_channel_control_is_aborted(*control_reg);
+ }
+#endif /* NDEBUG */
+
+private:
+
+ template<typename IntegerType>
+ Expected<IntegerType> read_integer(uint32_t offset)
+ {
+ auto value = m_driver.read_vdma_channel_register(m_channel_id, m_direction, offset, sizeof(IntegerType));
+ CHECK_EXPECTED(value);
+ return static_cast<IntegerType>(value.release());
+ }
+
+ template<typename IntegerType>
+ hailo_status write_integer(uint32_t offset, IntegerType value)
+ {
+ return m_driver.write_vdma_channel_register(m_channel_id, m_direction, offset, sizeof(value), value);
+ }
+
+ HailoRTDriver &m_driver;
+ const vdma::ChannelId m_channel_id;
+ const HailoRTDriver::DmaDirection m_direction;
+};
+
+} /* namespace hailort */
+
+#endif /*_HAILO_VDMA_CHANNEL__REGS_HPP_ */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file channel_index.hpp
- * @brief Struct used for channel identifier - (engine_index, channel_index) pair.
- **/
-
-#ifndef _HAILO_VDMA_CHANNEL_ID_HPP_
-#define _HAILO_VDMA_CHANNEL_ID_HPP_
-
-#include "hailo/hailort.h"
-#include "common/logger_macros.hpp"
-#include <spdlog/fmt/bundled/format.h>
-#include <sstream>
-
-
-namespace hailort {
-namespace vdma {
-
-// TODO: HRT-6949 don't use default engine index.
-static constexpr uint8_t DEFAULT_ENGINE_INDEX = 0;
-
-/**
- * For each dma engine we have 16 inputs channels and 16 output channels.
- * The amount of engines is determined by the driver.
- */
-struct ChannelId
-{
- uint8_t engine_index;
- uint8_t channel_index;
-
- // Allow put `ChannelId` inside std::set
- friend bool operator<(const ChannelId &a, const ChannelId &b)
- {
- return std::make_pair(a.engine_index, a.channel_index) < std::make_pair(b.engine_index, b.channel_index);
- }
-};
-
-} /* namespace vdma */
-} /* namespace hailort */
-
-
-template<>
-struct fmt::formatter<hailort::vdma::ChannelId> : fmt::formatter<fmt::string_view> {
- template <typename FormatContext>
- auto format(const hailort::vdma::ChannelId &input, FormatContext& ctx) -> decltype(ctx.out()) {
- std::stringstream ss;
- ss << static_cast<uint32_t>(input.engine_index) << ":"
- << static_cast<uint32_t>(input.channel_index);
- return fmt::formatter<fmt::string_view>::format(ss.str(), ctx);
- }
-};
-
-#endif /* _HAILO_VDMA_CHANNEL_ID_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file continuous_buffer.hpp
- * @brief Continuous physical vdma buffer.
- **/
-
-#include "continuous_buffer.hpp"
-
-namespace hailort {
-namespace vdma {
-
-// Minimum size of ccb buffers in descriptors, taken from the CCB spec.
-#define MIN_CCB_DESCRIPTORS_COUNT (16)
-
-static uint32_t align(uint32_t size, uint32_t align)
-{
- assert(is_powerof2(align));
- const uint32_t mask = align - 1;
- return (size + mask) & ~mask;
-}
-
-Expected<ContinuousBuffer> ContinuousBuffer::create(size_t size, HailoRTDriver &driver)
-{
- auto result = driver.vdma_continuous_buffer_alloc(size);
- CHECK_EXPECTED(result, "Failed allocating continuous buffer, size {}", size);
-
- uintptr_t handle = 0;
- uint64_t dma_address = 0;
- std::tie(handle, dma_address) = result.release();
-
- auto mmap = MmapBuffer<void>::create_file_map(size, driver.fd(), handle);
- if (!mmap) {
- LOGGER__ERROR("Failed mmap continuous buffer");
- driver.vdma_continuous_buffer_free(handle);
- return make_unexpected(mmap.status());
- }
-
- return ContinuousBuffer(size, driver, handle, dma_address, mmap.release());
-}
-
-uint32_t ContinuousBuffer::get_buffer_size(uint32_t buffer_size)
-{
- const uint16_t page_size = DEFAULT_DESC_PAGE_SIZE;
- const auto aligned_buffer_size = align(buffer_size, page_size);
-
- const uint32_t min_buffer_size = page_size * MIN_CCB_DESCRIPTORS_COUNT;
- return std::max(aligned_buffer_size, min_buffer_size);
-}
-
-uint32_t ContinuousBuffer::get_buffer_size_desc_power2(uint32_t buffer_size)
-{
- const uint16_t page_size = DEFAULT_DESC_PAGE_SIZE;
- const auto descriptors_in_buffer = DESCRIPTORS_IN_BUFFER(buffer_size, page_size);
- const auto actual_descriptors_count = get_nearest_powerof_2(descriptors_in_buffer, MIN_CCB_DESCRIPTORS_COUNT);
- return actual_descriptors_count * page_size;
-}
-
-ContinuousBuffer::~ContinuousBuffer()
-{
- if (0 != m_handle) {
- auto status = m_mmap.unmap();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed unmap mmap buffer {}", status);
- }
-
- status = m_driver.vdma_continuous_buffer_free(m_handle);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed free continuous buffer, {}", status);
- }
-
- m_handle = 0;
- }
-}
-
-size_t ContinuousBuffer::size() const
-{
- return m_size;
-}
-
-uint64_t ContinuousBuffer::dma_address() const
-{
- return m_dma_address;
-}
-
-uint16_t ContinuousBuffer::desc_page_size() const
-{
- // Currently we support only the default desc page size, TODO: HRT-5381 support more desc page size?
- return DEFAULT_DESC_PAGE_SIZE;
-}
-
-uint32_t ContinuousBuffer::descs_count() const
-{
- return descriptors_in_buffer(m_size);
-}
-
-hailo_status ContinuousBuffer::read(void *buf_dst, size_t count, size_t offset, bool /* should_sync */)
-{
- CHECK((count + offset) <= m_size, HAILO_INSUFFICIENT_BUFFER,
- "Requested size {} from offset {} is more than the buffer size {}", count, offset, m_size);
- // We use dma coherent mmap, so no need to sync the buffer after the memcpy.
- const auto src_address = reinterpret_cast<uint8_t*>(m_mmap.get()) + offset;
- memcpy(buf_dst, src_address, count);
- return HAILO_SUCCESS;
-}
-
-hailo_status ContinuousBuffer::write(const void *buf_src, size_t count, size_t offset)
-{
- CHECK((count + offset) <= m_size, HAILO_INSUFFICIENT_BUFFER,
- "Requested size {} from offset {} is more than the buffer size {}", count, offset, m_size);
- // We use dma coherent mmap, so no need to sync the buffer after the memcpy.
- const auto dst_address = reinterpret_cast<uint8_t*>(m_mmap.get()) + offset;
- memcpy(dst_address, buf_src, count);
- return HAILO_SUCCESS;
-}
-
-Expected<uint32_t> ContinuousBuffer::program_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular)
-{
- (void)first_desc_interrupts_domain;
- (void)last_desc_interrupts_domain;
- (void)desc_offset;
- (void)is_circular;
-
- // The descriptors in continuous mode are programmed by the hw, nothing to do here.
- return descriptors_in_buffer(transfer_size);
-}
-
-hailo_status ContinuousBuffer::reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
- VdmaInterruptsDomain new_interrupts_domain)
-{
- (void)transfer_size;
- (void)batch_size;
- (void)new_interrupts_domain;
-
- // The descriptors in continuous mode are programmed by the hw, nothing to do here.
- return HAILO_SUCCESS;
-}
-
-ContinuousBuffer::ContinuousBuffer(size_t size, HailoRTDriver &driver, uintptr_t handle, uint64_t dma_address,
- MmapBuffer<void> &&mmap) :
- m_size(size),
- m_driver(driver),
- m_handle(handle),
- m_dma_address(dma_address),
- m_mmap(std::move(mmap))
-{}
-
-}; /* namespace vdma */
-}; /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file continuous_buffer.hpp
- * @brief Continuous physical vdma buffer.
- **/
-
-#ifndef _HAILO_VDMA_CONTINUOUS_BUFFER_HPP_
-#define _HAILO_VDMA_CONTINUOUS_BUFFER_HPP_
-
-#include "os/hailort_driver.hpp"
-#include "os/mmap_buffer.hpp"
-#include "vdma/vdma_buffer.hpp"
-
-namespace hailort {
-namespace vdma {
-
-class ContinuousBuffer final : public VdmaBuffer {
-public:
- static Expected<ContinuousBuffer> create(size_t size, HailoRTDriver &driver);
-
- static uint32_t get_buffer_size(uint32_t buffer_size);
- // Get buffer size with the requirment that the amount of descriptors is a power of 2.
- static uint32_t get_buffer_size_desc_power2(uint32_t buffer_size);
-
- ContinuousBuffer(const ContinuousBuffer &) = delete;
- ContinuousBuffer& operator=(const ContinuousBuffer &) = delete;
- ContinuousBuffer& operator=(ContinuousBuffer &&) = delete;
-
- virtual ~ContinuousBuffer();
-
- ContinuousBuffer(ContinuousBuffer &&other) noexcept :
- VdmaBuffer(std::move(other)),
- m_size(other.m_size),
- m_driver(other.m_driver),
- m_handle(std::exchange(other.m_handle, 0)),
- m_dma_address(std::exchange(other.m_dma_address, 0)),
- m_mmap(std::move(other.m_mmap))
- {}
-
- virtual Type type() const override
- {
- return Type::CONTINUOUS;
- }
-
- virtual size_t size() const override;
- virtual uint64_t dma_address() const override;
- virtual uint16_t desc_page_size() const override;
- virtual uint32_t descs_count() const override;
-
- virtual hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync) override;
- virtual hailo_status write(const void *buf_src, size_t count, size_t offset) override;
-
- virtual Expected<uint32_t> program_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular) override;
- virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
- VdmaInterruptsDomain new_interrupts_domain) override;
-
-private:
- ContinuousBuffer(size_t size, HailoRTDriver &driver, uintptr_t handle, uint64_t dma_address,
- MmapBuffer<void> &&mmap);
-
- const size_t m_size;
- HailoRTDriver &m_driver;
- uintptr_t m_handle;
- uint64_t m_dma_address;
- MmapBuffer<void> m_mmap;
-};
-
-}; /* namespace vdma */
-}; /* namespace hailort */
-
-#endif /* _HAILO_VDMA_CONTINUOUS_BUFFER_HPP_ */
--- /dev/null
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "common/logger_macros.hpp"
+
+#include "vdma/integrated/integrated_device.hpp"
+#include "vdma/vdma_config_manager.hpp"
+
+#include "md5.h"
+#include <memory>
+
+static const std::string INTEGRATED_NNC_DRIVER_PATH = "/dev/hailo_integrated_nnc";
+
+namespace hailort
+{
+
+bool IntegratedDevice::is_loaded()
+{
+#if defined(_MSC_VER)
+ // windows is not supported for core driver
+ return false;
+#else
+ return (access(INTEGRATED_NNC_DRIVER_PATH.c_str(), F_OK) == 0);
+#endif // defined(_MSC_VER)
+}
+
+Expected<std::unique_ptr<IntegratedDevice>> IntegratedDevice::create()
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ auto driver = HailoRTDriver::create(INTEGRATED_NNC_DRIVER_PATH);
+ CHECK_EXPECTED(driver, "Failed to initialize HailoRTDriver");
+
+ auto device = std::unique_ptr<IntegratedDevice>(new (std::nothrow) IntegratedDevice(driver.release(), status, DEVICE_ID));
+ CHECK_AS_EXPECTED((nullptr != device), HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating IntegratedDevice");
+
+ return device;
+}
+
+
+IntegratedDevice::IntegratedDevice(HailoRTDriver &&driver, hailo_status &status, const std::string &device_id) :
+ VdmaDevice::VdmaDevice(std::move(driver), Device::Type::INTEGRATED, device_id)
+{
+ status = update_fw_state();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("update_fw_state() failed with status {}", status);
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<hailo_device_architecture_t> IntegratedDevice::get_architecture() const {
+ return Expected<hailo_device_architecture_t>(m_device_architecture);
+}
+
+hailo_status IntegratedDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
+{
+ if (CONTROL_PROTOCOL__RESET_TYPE__NN_CORE == reset_type) {
+ return m_driver.reset_nn_core();
+ }
+
+ LOGGER__ERROR("Can't reset IntegratedDevice, please use linux reboot");
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<size_t> IntegratedDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
+{
+ if (hailo_cpu_id_t::HAILO_CPU_ID_0 == cpu_id) {
+ LOGGER__ERROR("Read FW log is supported only on core CPU");
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+
+ return VdmaDevice::read_log(buffer, cpu_id);
+}
+
+} /* namespace hailort */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file integrated_device
+ * @brief Device used by Hailo-15
+ *
+ **/
+
+#ifndef _HAILO_INTEGRATED_DEVICE_HPP_
+#define _HAILO_INTEGRATED_DEVICE_HPP_
+
+#include "hailo/expected.hpp"
+#include "hailo/hailort.h"
+
+#include "vdma/vdma_device.hpp"
+
+#include <memory>
+
+
+namespace hailort
+{
+
+class IntegratedDevice : public VdmaDevice {
+public:
+ virtual ~IntegratedDevice() = default;
+ static bool is_loaded();
+ static Expected<std::unique_ptr<IntegratedDevice>> create();
+
+ virtual Expected<hailo_device_architecture_t> get_architecture() const override;
+ virtual const char* get_dev_id() const override {return DEVICE_ID;}
+ Expected<size_t> read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id);
+
+ virtual bool is_stream_interface_supported(const hailo_stream_interface_t &stream_interface) const override
+ {
+ switch (stream_interface) {
+ case HAILO_STREAM_INTERFACE_INTEGRATED:
+ return true;
+ case HAILO_STREAM_INTERFACE_PCIE:
+ case HAILO_STREAM_INTERFACE_ETH:
+ case HAILO_STREAM_INTERFACE_MIPI:
+ return false;
+ default:
+ LOGGER__ERROR("Invalid stream interface");
+ return false;
+ }
+ }
+
+ static constexpr const char *DEVICE_ID = "[integrated]";
+
+protected:
+ virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
+
+private:
+ IntegratedDevice(HailoRTDriver &&driver, hailo_status &status, const std::string &device_id);
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_INTEGRATED_DEVICE_HPP_ */
\ No newline at end of file
+++ /dev/null
-#include "mapped_buffer.hpp"
-
-namespace hailort {
-namespace vdma {
-
-Expected<MappedBuffer> MappedBuffer::create(size_t required_size, HailoRTDriver::DmaDirection data_direction,
- HailoRTDriver &driver)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- MappedBuffer object(required_size, data_direction, driver, status);
- if (HAILO_SUCCESS != status) {
- return make_unexpected(status);
- }
-
- return object;
-}
-
-MappedBuffer::MappedBuffer(
- size_t required_size, HailoRTDriver::DmaDirection data_direction, HailoRTDriver &driver, hailo_status &status)
- : m_size(required_size), m_driver(driver)
-{
- auto buffer = VdmaMappedBufferImpl::allocate_vdma_buffer(driver, required_size);
- if (! buffer) {
- status = buffer.status();
- return;
- }
-
- auto expected_handle = m_driver.vdma_buffer_map(buffer->get(), required_size, data_direction,
- buffer->get_mapped_buffer_identifier());
- if (!expected_handle) {
- status = expected_handle.status();
- return;
- }
-
- m_vdma_mapped_buffer = make_unique_nothrow<VdmaMappedBufferImpl>(buffer.release());
- if (nullptr == m_vdma_mapped_buffer) {
- m_driver.vdma_buffer_unmap(expected_handle.value());
- status = HAILO_OUT_OF_HOST_MEMORY;
- return;
- }
-
- m_handle = expected_handle.release();
- status = HAILO_SUCCESS;
-}
-
-MappedBuffer::~MappedBuffer()
-{
- if (m_vdma_mapped_buffer && *m_vdma_mapped_buffer) {
- m_driver.vdma_buffer_unmap(m_handle);
- }
-}
-
-hailo_status MappedBuffer::write(const void *buf_src, size_t count, size_t offset)
-{
- if ((count + offset) > m_size) {
- LOGGER__ERROR("Requested size {} from offset {} is more than the MappedBuffer size {}", count, offset, m_size);
- return HAILO_INSUFFICIENT_BUFFER;
- }
-
- if (count > 0) {
- auto dst_vdma_address = (uint8_t*)m_vdma_mapped_buffer->get() + offset;
- memcpy(dst_vdma_address, buf_src, count);
-
- auto status = m_driver.vdma_buffer_sync(m_handle, HailoRTDriver::DmaDirection::H2D, dst_vdma_address, count);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed synching vdma buffer on write");
- return status;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status MappedBuffer::read(void *buf_dst, size_t count, size_t offset, bool should_sync)
-{
- if ((count + offset) > m_size) {
- LOGGER__ERROR("Requested size {} from offset {} is more than the MappedBuffer size {}", count, offset, m_size);
- return HAILO_INSUFFICIENT_BUFFER;
- }
-
- if (count > 0) {
- const auto dst_vdma_address = (uint8_t*)m_vdma_mapped_buffer->get() + offset;
- if (should_sync) {
- const auto status = m_driver.vdma_buffer_sync(m_handle, HailoRTDriver::DmaDirection::D2H, dst_vdma_address, count);
- CHECK_SUCCESS(status, "Failed synching vdma buffer on read");
- }
-
- memcpy(buf_dst, dst_vdma_address, count);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status MappedBuffer::write_cyclic(const void *buf_src, size_t count, size_t offset)
-{
- if (count > m_size) {
- LOGGER__ERROR("Requested size({}) is more than the MappedBuffer size {}", count, m_size);
- return HAILO_INSUFFICIENT_BUFFER;
- }
-
- auto size_to_end = m_size - offset;
- auto copy_size = std::min(size_to_end, count);
- auto status = write(buf_src, copy_size, offset);
- if (HAILO_SUCCESS != status) {
- return status;
- }
-
- auto remaining_size = count - copy_size;
- if (remaining_size > 0) {
- status = write((uint8_t*)buf_src + copy_size, remaining_size, 0);
- if (HAILO_SUCCESS != status) {
- return status;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status MappedBuffer::read_cyclic(void *buf_dst, size_t count, size_t offset, bool should_sync)
-{
- if (count > m_size) {
- LOGGER__ERROR("Requested size({}) is more than the MappedBuffer size {}", count, m_size);
- return HAILO_INSUFFICIENT_BUFFER;
- }
-
- auto size_to_end = m_size - offset;
- auto copy_size = std::min(size_to_end, count);
- auto status = read(buf_dst, copy_size, offset, should_sync);
- if (HAILO_SUCCESS != status) {
- return status;
- }
-
- auto remaining_size = count - copy_size;
- if (remaining_size > 0) {
- status = read((uint8_t*)buf_dst + copy_size, remaining_size, 0, should_sync);
- if (HAILO_SUCCESS != status) {
- return status;
- }
- }
-
- return HAILO_SUCCESS;
-}
-
-} /* namespace vdma */
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file mapped_buffer.hpp
- * @brief The mapped buffer that is continuous in virtual memory, but not on physical memory.
- * We map the buffer to the IOMMU.
- *
- * The buffer can be used only with the help of a descriptors list that contains pointers to a physical
- * continuous "dma pages".
- *
- * There are 2 options to allocated the buffer:
- * 1. User mode allocation - the user mode calls `malloc` or `mmap` to allocate the buffer, then
- * using HailoRTDriver we map the driver to the IOMMU (and pin the pages to avoid pagigs).
- * This is the default option
- * 2. Kernel mode allocation - on some systems, the user mode doesn't allocate the memory in a "dma-able" address,
- * so we need to allocate the pages in driver.
- **/
-
-#ifndef _HAILO_VDMA_MAPPED_BUFFER_HPP_
-#define _HAILO_VDMA_MAPPED_BUFFER_HPP_
-
-#include "os/mmap_buffer.hpp"
-#include "os/hailort_driver.hpp"
-#include "hailo/expected.hpp"
-#include "vdma_mapped_buffer_impl.hpp"
-
-namespace hailort {
-namespace vdma {
-
-class MappedBuffer final
-{
-public:
- static Expected<MappedBuffer> create(size_t required_size, HailoRTDriver::DmaDirection data_direction,
- HailoRTDriver &driver);
-
- MappedBuffer(size_t required_size, HailoRTDriver::DmaDirection data_direction,
- HailoRTDriver &driver, hailo_status &status);
- ~MappedBuffer();
-
- MappedBuffer(const MappedBuffer &other) = delete;
- MappedBuffer &operator=(const MappedBuffer &other) = delete;
- MappedBuffer(MappedBuffer &&other) noexcept = default;
- MappedBuffer &operator=(MappedBuffer &&other) = delete;
-
- void *user_address() { return m_vdma_mapped_buffer->get(); }
- HailoRTDriver::VdmaBufferHandle handle() { return m_handle; }
- size_t size() const { return m_size; }
-
- /**
- * Copy data from buf_src parameter to this MappedBuffer.
- *
- * @note (offset + count) MUST be smaller than this MappedBuffer size
- *
- * @param[in] buf_src The buffer to copy the data from
- * @param[in] count Number of bytes to copy from buf_src
- * @param[in] offset The offset relative to this MappedBuffer to copy the data to
- */
- hailo_status write(const void *buf_src, size_t count, size_t offset);
-
- /**
- * Copy data from this MappedBuffer to buf_dst.
- *
- * @note (offset + count) MUST be smaller than this MappedBuffer size
- *
- * @param[out] buf_dst The buffer to copy the data to
- * @param[in] count Number of bytes to copy to buf_dst
- * @param[in] offset The offset relative to this MappedBuffer to copy the data from
- * @param[in] should_sync If the backing memory is vdma and it's written to by a device, sync should be true
- * so that the read will be consistent with the backing memory
- */
- hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync = true);
-
- /**
- * Copy data from buf_src parameter to this MappedBuffer.
- *
- * Similar to 'write' but if (offset + count) is larger than the MappedBuffer size, the copy continues
- * from the start of the MappedBuffer.
- *
- * @note count MUST be smaller than this MappedBuffer size
- *
- * @param[in] buf_src The buffer to copy the data from
- * @param[in] count Number of bytes to copy from buf_src
- * @param[in] offset The offset relative to this MappedBuffer to copy the data to
- */
- hailo_status write_cyclic(const void *buf_src, size_t count, size_t offset);
-
- /**
- * Copy data from this MappedBuffer to buf_dst.
- *
- * Similar to 'read' but if (offset + count) is larger than the MappedBuffer size, the copy continues
- * from the start of the MappedBuffer.
- *
- * @note count MUST be smaller than this MappedBuffer size
- *
- * @param[out] buf_dst The buffer to copy the data to
- * @param[in] count Number of bytes to copy to buf_dst
- * @param[in] offset The offset relative to this MappedBuffer to copy the data from
- * @param[in] should_sync If the backing memory is vdma and it's written to by a device, sync should be true
- * so that the read will be consistent with the backing memory
- */
- hailo_status read_cyclic(void *buf_dst, size_t count, size_t offset, bool should_sync = true);
-
-private:
-
- std::unique_ptr<VdmaMappedBufferImpl> m_vdma_mapped_buffer;
- HailoRTDriver::VdmaBufferHandle m_handle;
- size_t m_size;
- HailoRTDriver &m_driver;
-};
-
-} /* namespace vdma */
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_MAPPED_BUFFER_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file continuous_buffer.hpp
+ * @brief Continuous physical vdma buffer.
+ **/
+
+#include "continuous_buffer.hpp"
+
+namespace hailort {
+namespace vdma {
+
+// Minimum size of ccb buffers in descriptors, taken from the CCB spec.
+#define MIN_CCB_DESCRIPTORS_COUNT (16)
+
+static uint32_t align(uint32_t size, uint32_t align)
+{
+ assert(is_powerof2(align));
+ const uint32_t mask = align - 1;
+ return (size + mask) & ~mask;
+}
+
+Expected<ContinuousBuffer> ContinuousBuffer::create(size_t size, HailoRTDriver &driver)
+{
+ auto result = driver.vdma_continuous_buffer_alloc(size);
+ CHECK_EXPECTED(result, "Failed allocating continuous buffer, size {}", size);
+
+ uintptr_t handle = 0;
+ uint64_t dma_address = 0;
+ std::tie(handle, dma_address) = result.release();
+
+ auto mmap = MmapBuffer<void>::create_file_map(size, driver.fd(), handle);
+ if (!mmap) {
+ LOGGER__ERROR("Failed mmap continuous buffer");
+ driver.vdma_continuous_buffer_free(handle);
+ return make_unexpected(mmap.status());
+ }
+
+ return ContinuousBuffer(size, driver, handle, dma_address, mmap.release());
+}
+
+uint32_t ContinuousBuffer::get_buffer_size(uint32_t buffer_size)
+{
+ const uint16_t page_size = DEFAULT_DESC_PAGE_SIZE;
+ const auto aligned_buffer_size = align(buffer_size, page_size);
+
+ const uint32_t min_buffer_size = page_size * MIN_CCB_DESCRIPTORS_COUNT;
+ return std::max(aligned_buffer_size, min_buffer_size);
+}
+
+uint32_t ContinuousBuffer::get_buffer_size_desc_power2(uint32_t buffer_size)
+{
+ const uint16_t page_size = DEFAULT_DESC_PAGE_SIZE;
+ const auto descriptors_in_buffer = DIV_ROUND_UP(buffer_size, page_size);
+ const auto actual_descriptors_count = get_nearest_powerof_2(descriptors_in_buffer, MIN_CCB_DESCRIPTORS_COUNT);
+ return actual_descriptors_count * page_size;
+}
+
+ContinuousBuffer::~ContinuousBuffer()
+{
+ if (0 != m_handle) {
+ auto status = m_mmap.unmap();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed unmap mmap buffer {}", status);
+ }
+
+ status = m_driver.vdma_continuous_buffer_free(m_handle);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed free continuous buffer, {}", status);
+ }
+
+ m_handle = 0;
+ }
+}
+
+size_t ContinuousBuffer::size() const
+{
+ return m_size;
+}
+
+uint64_t ContinuousBuffer::dma_address() const
+{
+ return m_dma_address;
+}
+
+uint16_t ContinuousBuffer::desc_page_size() const
+{
+ // Currently we support only the default desc page size, TODO: HRT-5381 support more desc page size?
+ return DEFAULT_DESC_PAGE_SIZE;
+}
+
+uint32_t ContinuousBuffer::descs_count() const
+{
+ return descriptors_in_buffer(m_size);
+}
+
+hailo_status ContinuousBuffer::read(void *buf_dst, size_t count, size_t offset, bool /* should_sync */)
+{
+ CHECK((count + offset) <= m_size, HAILO_INSUFFICIENT_BUFFER,
+ "Requested size {} from offset {} is more than the buffer size {}", count, offset, m_size);
+ // We use dma coherent mmap, so no need to sync the buffer after the memcpy.
+ const auto src_address = reinterpret_cast<uint8_t*>(m_mmap.address()) + offset;
+ memcpy(buf_dst, src_address, count);
+ return HAILO_SUCCESS;
+}
+
+hailo_status ContinuousBuffer::write(const void *buf_src, size_t count, size_t offset)
+{
+ CHECK((count + offset) <= m_size, HAILO_INSUFFICIENT_BUFFER,
+ "Requested size {} from offset {} is more than the buffer size {}", count, offset, m_size);
+ // We use dma coherent mmap, so no need to sync the buffer after the memcpy.
+ const auto dst_address = reinterpret_cast<uint8_t*>(m_mmap.address()) + offset;
+ memcpy(dst_address, buf_src, count);
+ return HAILO_SUCCESS;
+}
+
+Expected<uint32_t> ContinuousBuffer::program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
+ size_t desc_offset, bool is_circular)
+{
+ (void)last_desc_interrupts_domain;
+ (void)desc_offset;
+ (void)is_circular;
+
+ // The descriptors in continuous mode are programmed by the hw, nothing to do here.
+ return descriptors_in_buffer(transfer_size);
+}
+
+hailo_status ContinuousBuffer::reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
+ InterruptsDomain new_interrupts_domain)
+{
+ (void)transfer_size;
+ (void)batch_size;
+ (void)new_interrupts_domain;
+
+ // The descriptors in continuous mode are programmed by the hw, nothing to do here.
+ return HAILO_SUCCESS;
+}
+
+ContinuousBuffer::ContinuousBuffer(size_t size, HailoRTDriver &driver, uintptr_t handle, uint64_t dma_address,
+ MmapBuffer<void> &&mmap) :
+ m_size(size),
+ m_driver(driver),
+ m_handle(handle),
+ m_dma_address(dma_address),
+ m_mmap(std::move(mmap))
+{}
+
+}; /* namespace vdma */
+}; /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file continuous_buffer.hpp
+ * @brief Continuous physical vdma buffer.
+ **/
+
+#ifndef _HAILO_VDMA_CONTINUOUS_BUFFER_HPP_
+#define _HAILO_VDMA_CONTINUOUS_BUFFER_HPP_
+
+#include "os/hailort_driver.hpp"
+#include "os/mmap_buffer.hpp"
+#include "vdma/memory/vdma_buffer.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+class ContinuousBuffer final : public VdmaBuffer {
+public:
+ static Expected<ContinuousBuffer> create(size_t size, HailoRTDriver &driver);
+
+ static uint32_t get_buffer_size(uint32_t buffer_size);
+ // Get buffer size with the requirment that the amount of descriptors is a power of 2.
+ static uint32_t get_buffer_size_desc_power2(uint32_t buffer_size);
+
+ ContinuousBuffer(const ContinuousBuffer &) = delete;
+ ContinuousBuffer& operator=(const ContinuousBuffer &) = delete;
+ ContinuousBuffer& operator=(ContinuousBuffer &&) = delete;
+
+ virtual ~ContinuousBuffer();
+
+ ContinuousBuffer(ContinuousBuffer &&other) noexcept :
+ VdmaBuffer(std::move(other)),
+ m_size(other.m_size),
+ m_driver(other.m_driver),
+ m_handle(std::exchange(other.m_handle, 0)),
+ m_dma_address(std::exchange(other.m_dma_address, 0)),
+ m_mmap(std::move(other.m_mmap))
+ {}
+
+ virtual Type type() const override
+ {
+ return Type::CONTINUOUS;
+ }
+
+ virtual size_t size() const override;
+ virtual uint64_t dma_address() const override;
+ virtual uint16_t desc_page_size() const override;
+ virtual uint32_t descs_count() const override;
+
+ virtual hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync) override;
+ virtual hailo_status write(const void *buf_src, size_t count, size_t offset) override;
+
+ virtual Expected<uint32_t> program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
+ size_t desc_offset, bool is_circular) override;
+ virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
+ InterruptsDomain new_interrupts_domain) override;
+
+private:
+ ContinuousBuffer(size_t size, HailoRTDriver &driver, uintptr_t handle, uint64_t dma_address,
+ MmapBuffer<void> &&mmap);
+
+ const size_t m_size;
+ HailoRTDriver &m_driver;
+ uintptr_t m_handle;
+ uint64_t m_dma_address;
+ MmapBuffer<void> m_mmap;
+};
+
+}; /* namespace vdma */
+}; /* namespace hailort */
+
+#endif /* _HAILO_VDMA_CONTINUOUS_BUFFER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file descriptor_list.cpp
+ * @brief Implements vdma descriptor list class
+ **/
+
+#include "vdma/memory/descriptor_list.hpp"
+#include "vdma/memory/mapped_buffer_impl.hpp"
+
+#include "utils.h"
+
+
+#define DESC_STATUS_REQ (1 << 0)
+#define DESC_STATUS_REQ_ERR (1 << 1)
+#define DESC_REQUREST_IRQ_PROCESSED (1 << 2)
+#define DESC_REQUREST_IRQ_ERR (1 << 3)
+
+#define PCIE_DMA_HOST_INTERRUPTS_BITMASK (1 << 5)
+#define PCIE_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 4)
+
+#define DRAM_DMA_HOST_INTERRUPTS_BITMASK (1 << 4)
+#define DRAM_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 5)
+
+#define DESC_PAGE_SIZE_SHIFT (8)
+#define DESC_PAGE_SIZE_MASK (0xFFFFFF00)
+#define DESC_IRQ_MASK (0x0000003C)
+
+namespace hailort {
+namespace vdma {
+
+
+Expected<DescriptorList> DescriptorList::create(uint32_t desc_count, uint16_t requested_desc_page_size,
+ HailoRTDriver &driver)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto desc_page_size_value = driver.calc_desc_page_size(requested_desc_page_size);
+ DescriptorList object(desc_count, driver, desc_page_size_value, status);
+ if (HAILO_SUCCESS != status) {
+ return make_unexpected(status);
+ }
+
+ // No need to initialize descripotrs here because they are initialized in driver in hailo_vdma_program_descriptor()
+
+ return object;
+}
+
+DescriptorList::DescriptorList(uint32_t desc_count, HailoRTDriver &driver, uint16_t desc_page_size,
+ hailo_status &status) :
+ m_mapped_list(),
+ m_count(desc_count),
+ m_depth(0),
+ m_desc_handle(0),
+ m_dma_address(0),
+ m_driver(driver),
+ m_desc_page_size(desc_page_size)
+{
+ if (!is_powerof2(desc_count)) {
+ LOGGER__ERROR("Descriptor count ({}) must be power of 2", desc_count);
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ auto depth = calculate_desc_list_depth(desc_count);
+ if (!depth) {
+ status = depth.status();
+ return;
+ }
+ m_depth = depth.value();
+
+ auto desc_handle_phys_addr_pair = m_driver.descriptors_list_create(desc_count);
+ if (!desc_handle_phys_addr_pair) {
+ status = desc_handle_phys_addr_pair.status();
+ return;
+ }
+
+ m_desc_handle = desc_handle_phys_addr_pair->first;
+ m_dma_address = desc_handle_phys_addr_pair->second;
+
+ auto mapped_list = MmapBuffer<VdmaDescriptor>::create_file_map(desc_count * sizeof(VdmaDescriptor), m_driver.fd(), m_desc_handle);
+ if (!mapped_list) {
+ LOGGER__ERROR("Failed to memory map descriptors. desc handle: {:X}", m_desc_handle);
+ status = mapped_list.status();
+ return;
+ }
+
+ m_mapped_list = mapped_list.release();
+ status = HAILO_SUCCESS;
+}
+
+DescriptorList::~DescriptorList()
+{
+ if (HAILO_SUCCESS != m_mapped_list.unmap()) {
+ LOGGER__ERROR("Failed to release descriptors mapping");
+ }
+
+ // Note: The descriptors_list is freed by the desc_handle (no need to use the phys_address to free)
+ if (0 != m_desc_handle) {
+ if(HAILO_SUCCESS != m_driver.descriptors_list_release(m_desc_handle)) {
+ LOGGER__ERROR("Failed to release descriptor list {}", m_desc_handle);
+ }
+ }
+}
+
+DescriptorList::DescriptorList(DescriptorList &&other) noexcept :
+ m_mapped_list(std::move(other.m_mapped_list)),
+ m_count(std::move(other.m_count)),
+ m_depth(std::move(other.m_depth)),
+ m_desc_handle(std::exchange(other.m_desc_handle, 0)),
+ m_dma_address(std::exchange(other.m_dma_address, 0)),
+ m_driver(other.m_driver),
+ m_desc_page_size(other.m_desc_page_size) {}
+
+Expected<uint8_t> DescriptorList::calculate_desc_list_depth(size_t count)
+{
+ // Calculate log2 of m_count (by finding the offset of the MSB)
+ uint32_t depth = 0;
+ while (count >>= 1) {
+ ++depth;
+ }
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(depth), HAILO_INTERNAL_FAILURE, "Calculated desc_list_depth is too big: {}", depth);
+ return static_cast<uint8_t>(depth);
+}
+
+hailo_status DescriptorList::configure_to_use_buffer(DmaMappedBuffer& buffer, ChannelId channel_id, uint32_t starting_desc)
+{
+ const auto desc_list_capacity = m_desc_page_size * m_count;
+ CHECK(buffer.size() <= desc_list_capacity, HAILO_INVALID_ARGUMENT,
+ "Can't bind a buffer larger than the descriptor list's capacity. Buffer size {}, descriptor list capacity {}",
+ buffer.size(), desc_list_capacity);
+
+ return m_driver.descriptors_list_bind_vdma_buffer(m_desc_handle, buffer.pimpl->handle(), m_desc_page_size,
+ channel_id.channel_index, starting_desc);
+}
+
+Expected<uint16_t> DescriptorList::program_last_descriptor(size_t transfer_size,
+ InterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular)
+{
+ assert(transfer_size > 0);
+ const auto required_descriptors = descriptors_in_buffer(transfer_size);
+ // Required_descriptors + desc_offset can't reach m_count.
+ if ((!is_circular) && ((required_descriptors + desc_offset) > m_count)){
+ LOGGER__ERROR("Requested transfer size ({}) result in more descriptors than available ({})", transfer_size, m_count);
+ return make_unexpected(HAILO_OUT_OF_DESCRIPTORS);
+ }
+
+ // Program last descriptor of the transfer size
+ /* write residue page with the remaining buffer size*/
+ auto resuide = transfer_size - (required_descriptors - 1) * m_desc_page_size;
+ assert(IS_FIT_IN_UINT16(resuide));
+ size_t last_desc = (desc_offset + required_descriptors - 1) & (m_count - 1);
+ program_single_descriptor((*this)[last_desc], static_cast<uint16_t>(resuide), last_desc_interrupts_domain);
+
+ return std::move(static_cast<uint16_t>(required_descriptors));
+}
+
+hailo_status DescriptorList::reprogram_descriptor_interrupts_domain(size_t desc_index,
+ InterruptsDomain interrupts_domain)
+{
+ if (desc_index >= m_count){
+ LOGGER__ERROR("Requested desc (index={}) exceeds the number of descriptors in the list ({})", desc_index, m_count);
+ return HAILO_OUT_OF_DESCRIPTORS;
+ }
+ reprogram_single_descriptor_interrupts_domain((*this)[desc_index], interrupts_domain);
+ return HAILO_SUCCESS;
+}
+
+uint32_t DescriptorList::descriptors_in_buffer(size_t buffer_size) const
+{
+ return descriptors_in_buffer(buffer_size, m_desc_page_size);
+}
+
+uint32_t DescriptorList::descriptors_in_buffer(size_t buffer_size, uint16_t desc_page_size)
+{
+ assert(buffer_size < std::numeric_limits<uint32_t>::max());
+ return static_cast<uint32_t>(DIV_ROUND_UP(buffer_size, desc_page_size));
+}
+
+uint32_t DescriptorList::calculate_descriptors_count(uint32_t buffer_size, uint16_t batch_size, uint16_t desc_page_size)
+{
+ // Because we use cyclic buffer, the amount of active descs is lower by one that the amount
+ // of descs given (Otherwise we won't be able to determine if the buffer is empty or full).
+ // Therefore we add 1 in order to compensate.
+ uint32_t descs_count = std::min(((descriptors_in_buffer(buffer_size, desc_page_size) * batch_size) + 1),
+ MAX_DESCS_COUNT);
+
+ return get_nearest_powerof_2(descs_count, MIN_DESCS_COUNT);
+}
+
+Expected<std::pair<uint16_t, uint32_t>> DescriptorList::get_desc_buffer_sizes_for_single_transfer(
+ const HailoRTDriver &driver, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size)
+{
+ // Note: If the pages pointed to by the descriptors are copied in their entirety, then DEFAULT_DESC_PAGE_SIZE
+ // is the optimal value. For transfer_sizes smaller than DEFAULT_DESC_PAGE_SIZE using smaller descriptor page
+ // sizes will save memory consuption without harming performance. In the case of nms for example, only one bbox
+ // is copied from each page. Hence, we'll use MIN_DESC_PAGE_SIZE for nms.
+ const uint32_t initial_desc_page_size = (DEFAULT_DESC_PAGE_SIZE > transfer_size) ?
+ get_nearest_powerof_2(transfer_size, MIN_DESC_PAGE_SIZE) : DEFAULT_DESC_PAGE_SIZE;
+ if (DEFAULT_DESC_PAGE_SIZE != initial_desc_page_size) {
+ LOGGER__INFO("Using non-default initial_desc_page_size of {}, due to a small transfer size ({})",
+ initial_desc_page_size, transfer_size);
+ }
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(initial_desc_page_size), HAILO_INTERNAL_FAILURE,
+ "Descriptor page size needs to fit in 16B");
+
+ return get_desc_buffer_sizes_for_single_transfer_impl(driver, min_batch_size, max_batch_size, transfer_size,
+ static_cast<uint16_t>(initial_desc_page_size));
+}
+
+Expected<std::pair<uint16_t, uint32_t>> DescriptorList::get_desc_buffer_sizes_for_multiple_transfers(
+ const HailoRTDriver &driver, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes)
+{
+ return get_desc_buffer_sizes_for_multiple_transfers_impl(driver, batch_size, transfer_sizes,
+ DEFAULT_DESC_PAGE_SIZE);
+}
+
+Expected<std::pair<uint16_t, uint32_t>> DescriptorList::get_desc_buffer_sizes_for_single_transfer_impl(
+ const HailoRTDriver &driver, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size,
+ uint16_t initial_desc_page_size)
+{
+ auto results = DescriptorList::get_desc_buffer_sizes_for_multiple_transfers_impl(driver, min_batch_size,
+ {transfer_size}, initial_desc_page_size);
+ CHECK_EXPECTED(results);
+
+ auto page_size = results->first;
+
+ auto desc_count = std::min(MAX_DESCS_COUNT,
+ DescriptorList::calculate_descriptors_count(transfer_size, max_batch_size, page_size));
+
+ return std::make_pair(page_size, desc_count);
+}
+
+Expected<std::pair<uint16_t, uint32_t>> DescriptorList::get_desc_buffer_sizes_for_multiple_transfers_impl(
+ const HailoRTDriver &driver, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes,
+ uint16_t initial_desc_page_size)
+{
+ const uint16_t min_desc_page_size = driver.calc_desc_page_size(MIN_DESC_PAGE_SIZE);
+ const uint16_t max_desc_page_size = driver.calc_desc_page_size(MAX_DESC_PAGE_SIZE);
+ // Defined as uint32_t to prevent overflow (as we multiply it by two in each iteration of the while loop bellow)
+ uint32_t local_desc_page_size = driver.calc_desc_page_size(initial_desc_page_size);
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(local_desc_page_size), HAILO_INTERNAL_FAILURE,
+ "Descriptor page size needs to fit in 16B");
+ CHECK_AS_EXPECTED(local_desc_page_size <= max_desc_page_size, HAILO_INTERNAL_FAILURE,
+ "Initial descriptor page size ({}) is larger than maximum descriptor page size ({})",
+ local_desc_page_size, max_desc_page_size);
+ CHECK_AS_EXPECTED(local_desc_page_size >= min_desc_page_size, HAILO_INTERNAL_FAILURE,
+ "Initial descriptor page size ({}) is smaller than minimum descriptor page size ({})",
+ local_desc_page_size, min_desc_page_size);
+
+ uint32_t acc_desc_count = get_descriptors_count_needed(transfer_sizes, static_cast<uint16_t>(local_desc_page_size));
+
+ // Too many descriptors; try a larger desc_page_size which will lead to less descriptors used
+ while ((acc_desc_count * batch_size) > (MAX_DESCS_COUNT - 1)) {
+ local_desc_page_size <<= 1;
+
+ CHECK_AS_EXPECTED(local_desc_page_size <= max_desc_page_size, HAILO_OUT_OF_DESCRIPTORS,
+ "Network shapes and batch size exceeds driver descriptors capabilities. "
+ "Required descriptors count: {}, max allowed on the driver: {}. "
+ "(A common cause for this error could be the batch size - which is {}).",
+ (batch_size * acc_desc_count), (MAX_DESCS_COUNT - 1), batch_size);
+
+ CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(local_desc_page_size), HAILO_INTERNAL_FAILURE,
+ "Descriptor page size needs to fit in 16B");
+
+ acc_desc_count = get_descriptors_count_needed(transfer_sizes, static_cast<uint16_t>(local_desc_page_size));
+ }
+
+ // Found desc_page_size and acc_desc_count
+ const auto desc_page_size = static_cast<uint16_t>(local_desc_page_size);
+
+ // Find descs_count
+ const auto descs_count = get_nearest_powerof_2(acc_desc_count, MIN_DESCS_COUNT);
+ CHECK_AS_EXPECTED(descs_count <= MAX_DESCS_COUNT, HAILO_OUT_OF_DESCRIPTORS);
+
+ if (initial_desc_page_size != desc_page_size) {
+ LOGGER__WARNING("Desc page size value ({}) is not optimal for performance.", desc_page_size);
+ }
+
+ return std::make_pair(desc_page_size, descs_count);
+}
+
+uint32_t DescriptorList::get_descriptors_count_needed(const std::vector<uint32_t> &transfer_sizes,
+ uint16_t desc_page_size)
+{
+ uint32_t desc_count = 0;
+ for (auto &transfer_size : transfer_sizes) {
+ desc_count += descriptors_in_buffer(transfer_size, desc_page_size);
+ }
+
+ // One extra descriptor is needed, because the amount of available descriptors is (desc_count - 1)
+ desc_count += 1;
+ return desc_count;
+}
+
+uint32_t DescriptorList::get_interrupts_bitmask(InterruptsDomain interrupts_domain)
+{
+ uint32_t host_bitmask = 0;
+ uint32_t device_bitmask = 0;
+
+ switch (m_driver.dma_type()) {
+ case HailoRTDriver::DmaType::PCIE:
+ host_bitmask = PCIE_DMA_HOST_INTERRUPTS_BITMASK;
+ device_bitmask = PCIE_DMA_DEVICE_INTERRUPTS_BITMASK;
+ break;
+ case HailoRTDriver::DmaType::DRAM:
+ host_bitmask = DRAM_DMA_HOST_INTERRUPTS_BITMASK;
+ device_bitmask = DRAM_DMA_DEVICE_INTERRUPTS_BITMASK;
+ break;
+ default:
+ assert(false);
+ }
+
+ uint32_t bitmask = 0;
+ if (host_interuptes_enabled(interrupts_domain)) {
+ bitmask |= host_bitmask;
+ }
+ if (device_interuptes_enabled(interrupts_domain)) {
+ bitmask |= device_bitmask;
+ }
+
+ return bitmask;
+}
+
+void DescriptorList::program_single_descriptor(VdmaDescriptor &descriptor, uint16_t page_size,
+ InterruptsDomain interrupts_domain)
+{
+ // Update the descriptor's PAGE_SIZE field in the control register with the maximum size of the DMA page.
+ // Make all edits to the local variable local_pagesize_desc_ctrl that is on the stack to save read/writes to DDR
+ auto local_pagesize_desc_ctrl = static_cast<uint32_t>(page_size << DESC_PAGE_SIZE_SHIFT) & DESC_PAGE_SIZE_MASK;
+
+ if (InterruptsDomain::NONE != interrupts_domain) {
+ // Update the desc_control
+ local_pagesize_desc_ctrl |= (DESC_REQUREST_IRQ_PROCESSED | DESC_REQUREST_IRQ_ERR |
+ get_interrupts_bitmask(interrupts_domain));
+#ifndef NDEBUG
+ local_pagesize_desc_ctrl |= (DESC_STATUS_REQ | DESC_STATUS_REQ_ERR);
+#endif
+ }
+
+ descriptor.PageSize_DescControl = local_pagesize_desc_ctrl;
+
+#ifndef NDEBUG
+ // Clear status
+ descriptor.RemainingPageSize_Status = 0;
+#endif
+}
+
+void DescriptorList::reprogram_single_descriptor_interrupts_domain(VdmaDescriptor &descriptor,
+ InterruptsDomain interrupts_domain)
+{
+ // Set the IRQ control bits to zero
+ // Make all edits to the local variable local_pagesize_desc_ctrl that is on the stack to save read/writes to DDR
+ auto local_pagesize_desc_ctrl = (descriptor.PageSize_DescControl & ~DESC_IRQ_MASK);
+
+ if (InterruptsDomain::NONE == interrupts_domain) {
+ // Nothing else to do
+ descriptor.PageSize_DescControl = local_pagesize_desc_ctrl;
+ return;
+ }
+
+ local_pagesize_desc_ctrl |= (DESC_REQUREST_IRQ_PROCESSED | DESC_REQUREST_IRQ_ERR |
+ get_interrupts_bitmask(interrupts_domain));
+
+ descriptor.PageSize_DescControl = local_pagesize_desc_ctrl;
+}
+
+void DescriptorList::clear_descriptor(const size_t desc_index)
+{
+ // Clear previous descriptor properties
+ program_single_descriptor((*this)[desc_index], m_desc_page_size, InterruptsDomain::NONE);
+}
+
+} /* namespace vdma */
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file descriptor_list.hpp
+ * @brief Allocates a list of buffer descriptors used for VDMA
+ **/
+
+#ifndef _HAILO_VDMA_DESCRIPTOR_LIST_HPP_
+#define _HAILO_VDMA_DESCRIPTOR_LIST_HPP_
+
+#include "hailo/expected.hpp"
+#include "hailo/dma_mapped_buffer.hpp"
+
+#include "common/utils.hpp"
+
+#include "vdma/channel/channel_id.hpp"
+#include "os/hailort_driver.hpp"
+#include "os/mmap_buffer.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+
+#define MAX_DESCS_COUNT (64 * 1024u)
+#define MIN_DESCS_COUNT (2u)
+#define DEFAULT_DESC_COUNT (64 * 1024u)
+
+static_assert(is_powerof2(MAX_DESCS_COUNT), "MAX_DESCS_COUNT must be a power of 2");
+static_assert(is_powerof2(MIN_DESCS_COUNT), "MIN_DESCS_COUNT must be a power of 2");
+static_assert(is_powerof2(DEFAULT_DESC_COUNT), "DEFAULT_DESC_COUNT must be a power of 2");
+static_assert(DEFAULT_DESC_COUNT <= MAX_DESCS_COUNT && DEFAULT_DESC_COUNT >= MIN_DESCS_COUNT,
+ "DEFAULT_DESC_COUNT not in range");
+
+// From PLDA's vDMA controller reference:
+// - Addresses of pages pointed to by vDMA descriptors need to be on a 64B boundry.
+// Hence, we require a minimum page size of 64B.
+// - G_PAGE_SIZE_MAX dictates the maximum desc page size:
+// max_page_size = 2 ^ (G_PAGE_SIZE_MAX - 1)
+// In our case max_page_size = 2 ^ (13 - 1) = 4096
+#define MIN_DESC_PAGE_SIZE (64u)
+// TODO: Calculate from G_PAGE_SIZE_MAX (I.e. read the reg etc.)
+#define MAX_DESC_PAGE_SIZE (4096u)
+static constexpr uint16_t DEFAULT_DESC_PAGE_SIZE = 512;
+
+static_assert(is_powerof2(MIN_DESC_PAGE_SIZE), "MIN_DESC_PAGE_SIZE must be a power of 2");
+static_assert(MIN_DESC_PAGE_SIZE > 0, "MIN_DESC_PAGE_SIZE must be larger then 0");
+static_assert(is_powerof2(MAX_DESC_PAGE_SIZE), "MAX_DESC_PAGE_SIZE must be a power of 2");
+static_assert(MAX_DESC_PAGE_SIZE > 0, "MAX_DESC_PAGE_SIZE must be larger then 0");
+static_assert(is_powerof2(DEFAULT_DESC_PAGE_SIZE), "DEFAULT_DESC_PAGE_SIZE must be a power of 2");
+static_assert(DEFAULT_DESC_PAGE_SIZE > 0, "DEFAULT_DESC_PAGE_SIZE must be larger then 0");
+
+
+struct VdmaDescriptor
+{
+ uint32_t PageSize_DescControl;
+ uint32_t AddrL_rsvd_DataID;
+ uint32_t AddrH;
+ uint32_t RemainingPageSize_Status;
+};
+
+enum class InterruptsDomain
+{
+ NONE = 0,
+ DEVICE = 1 << 0,
+ HOST = 1 << 1,
+ BOTH = DEVICE | HOST
+};
+
+inline bool host_interuptes_enabled(InterruptsDomain interrupts_domain)
+{
+ return 0 != (static_cast<uint32_t>(interrupts_domain) & static_cast<uint32_t>(InterruptsDomain::HOST));
+}
+
+inline bool device_interuptes_enabled(InterruptsDomain interrupts_domain)
+{
+ return 0 != (static_cast<uint32_t>(interrupts_domain) & static_cast<uint32_t>(InterruptsDomain::DEVICE));
+}
+
+class DescriptorList
+{
+public:
+ static Expected<DescriptorList> create(uint32_t desc_count, uint16_t requested_desc_page_size,
+ HailoRTDriver &driver);
+
+ ~DescriptorList();
+
+ DescriptorList(const DescriptorList &other) = delete;
+ DescriptorList &operator=(const DescriptorList &other) = delete;
+ DescriptorList(DescriptorList &&other) noexcept;
+ DescriptorList &operator=(DescriptorList &&other) = delete;
+
+ uint8_t depth() const
+ {
+ return m_depth;
+ }
+
+ uint32_t count() const
+ {
+ return m_count;
+ }
+
+ uint64_t dma_address() const
+ {
+ return m_dma_address;
+ }
+
+ VdmaDescriptor& operator[](size_t i)
+ {
+ assert(i < m_count);
+ return m_mapped_list[i];
+ }
+
+ uint16_t desc_page_size() const
+ {
+ return m_desc_page_size;
+ }
+
+ uintptr_t handle() const
+ {
+ return m_desc_handle;
+ }
+
+ uint16_t max_transfers(uint32_t transfer_size)
+ {
+ // We need to keep at least 1 free desc at all time.
+ return static_cast<uint16_t>((m_count - 1) / descriptors_in_buffer(transfer_size));
+ }
+
+ // Map descriptors starting at offset to the start of buffer, wrapping around the descriptor list as needed
+ // On hailo8, we allow configuring buffer without specific channel index (default is INVALID_VDMA_CHANNEL_INDEX).
+ hailo_status configure_to_use_buffer(DmaMappedBuffer& buffer, ChannelId channel_id, uint32_t starting_desc = 0);
+ // All descritors are initialized to have size of m_desc_page_size - so all we do is set the last descritor for the
+ // Interrupt - and then after transfer has finished clear the previously used first and last decsriptors.
+ // This saves us write/ reads to the desscriptor list which is DMA memory.
+ Expected<uint16_t> program_last_descriptor(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
+ size_t desc_offset, bool is_circular);
+ void program_single_descriptor(VdmaDescriptor &descriptor, uint16_t page_size, InterruptsDomain interrupts_domain);
+ hailo_status reprogram_descriptor_interrupts_domain(size_t desc_index, InterruptsDomain interrupts_domain);
+ void clear_descriptor(const size_t desc_index);
+
+ uint32_t descriptors_in_buffer(size_t buffer_size) const;
+ static uint32_t descriptors_in_buffer(size_t buffer_size, uint16_t desc_page_size);
+ static uint32_t calculate_descriptors_count(uint32_t buffer_size, uint16_t batch_size, uint16_t desc_page_size);
+ static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_single_transfer(const HailoRTDriver &driver,
+ uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size);
+ static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_multiple_transfers(const HailoRTDriver &driver,
+ uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes);
+
+private:
+ DescriptorList(uint32_t desc_count, HailoRTDriver &driver, uint16_t desc_page_size, hailo_status &status);
+ uint32_t get_interrupts_bitmask(InterruptsDomain interrupts_domain);
+ void reprogram_single_descriptor_interrupts_domain(VdmaDescriptor &descriptor, InterruptsDomain interrupts_domain);
+ static Expected<uint8_t> calculate_desc_list_depth(size_t count);
+ // Note: initial_desc_page_size should be the optimal descriptor page size.
+ static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_single_transfer_impl(
+ const HailoRTDriver &driver, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size,
+ uint16_t initial_desc_page_size);
+ static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_multiple_transfers_impl(
+ const HailoRTDriver &driver, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes,
+ uint16_t initial_desc_page_size);
+ static uint32_t get_descriptors_count_needed(const std::vector<uint32_t> &transfer_sizes,
+ uint16_t desc_page_size);
+
+ MmapBuffer<VdmaDescriptor> m_mapped_list;
+ uint32_t m_count;
+ uint8_t m_depth;
+ uintptr_t m_desc_handle;
+ uint64_t m_dma_address;
+ HailoRTDriver &m_driver;
+ const uint16_t m_desc_page_size;
+};
+
+} /* namespace vdma */
+} /* namespace hailort */
+
+#endif //_HAILO_VDMA_DESCRIPTOR_LIST_HPP_
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file vmda_mapped_buffer.cpp
+ * @brief Vdma mapped buffer implementation
+ **/
+
+#include "hailo/dma_mapped_buffer.hpp"
+
+#include "vdma/memory/mapped_buffer_impl.hpp"
+#include "vdma/vdma_device.hpp"
+
+
+namespace hailort {
+
+static Expected<HailoRTDriver::DmaDirection> convert_flags_to_driver_enum(hailo_vdma_buffer_direction_flags_t data_direction)
+{
+ static const auto BOTH_DIRECTIONS = HAILO_VDMA_BUFFER_DIRECTION_FLAGS_H2D | HAILO_VDMA_BUFFER_DIRECTION_FLAGS_D2H;
+ if ((data_direction & BOTH_DIRECTIONS) == BOTH_DIRECTIONS) {
+ return HailoRTDriver::DmaDirection::BOTH;
+ }
+
+ if ((data_direction & HAILO_VDMA_BUFFER_DIRECTION_FLAGS_H2D) == HAILO_VDMA_BUFFER_DIRECTION_FLAGS_H2D) {
+ return HailoRTDriver::DmaDirection::H2D;
+ }
+
+ if ((data_direction & HAILO_VDMA_BUFFER_DIRECTION_FLAGS_D2H) == HAILO_VDMA_BUFFER_DIRECTION_FLAGS_D2H) {
+ return HailoRTDriver::DmaDirection::D2H;
+ }
+
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+}
+
+// TODO: this should maybe be a vdevice (for mapping buffers to multiple devs)
+// TODO: a helper function for the cast to VdmaDevice
+Expected<DmaMappedBuffer> DmaMappedBuffer::create(size_t size,
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device)
+{
+ static const auto ALLOCATE_BUFFER = nullptr;
+ return create(ALLOCATE_BUFFER, size, data_direction_flags, device);
+}
+
+Expected<DmaMappedBuffer> DmaMappedBuffer::create_from_user_address(void *user_address, size_t size,
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device)
+{
+ CHECK_ARG_NOT_NULL_AS_EXPECTED(user_address);
+ return create(user_address, size, data_direction_flags, device);
+}
+
+Expected<DmaMappedBuffer> DmaMappedBuffer::create(void *user_address, size_t size,
+ hailo_vdma_buffer_direction_flags_t data_direction_flags, Device &device)
+{
+ const auto device_type = device.get_type();
+ CHECK_AS_EXPECTED(((Device::Type::INTEGRATED == device_type) || (Device::Type::PCIE == device_type)),
+ HAILO_INVALID_ARGUMENT, "Invalid device type (expected integrated/pcie, received {})", device_type);
+ VdmaDevice *vdma_device = reinterpret_cast<VdmaDevice*>(&device);
+
+ auto data_direction = convert_flags_to_driver_enum(data_direction_flags);
+ CHECK_EXPECTED(data_direction, "Invalid direction flags received {}", data_direction_flags);
+
+ auto pimpl_exp = Impl::create(vdma_device->get_driver(), data_direction.release(), size, user_address);
+ CHECK_EXPECTED(pimpl_exp);
+
+ auto pimpl = make_unique_nothrow<Impl>(pimpl_exp.release());
+ CHECK_NOT_NULL_AS_EXPECTED(pimpl, HAILO_OUT_OF_HOST_MEMORY);
+
+ return DmaMappedBuffer(std::move(pimpl));
+}
+
+DmaMappedBuffer::DmaMappedBuffer(std::unique_ptr<Impl> pimpl) :
+ pimpl(std::move(pimpl))
+{}
+
+// Note: These can't be defined in the header due to the use of pimpl (it'll cause a compilation error)
+DmaMappedBuffer::DmaMappedBuffer(DmaMappedBuffer &&other) noexcept = default;
+DmaMappedBuffer::~DmaMappedBuffer() = default;
+
+void *DmaMappedBuffer::user_address()
+{
+ return pimpl->user_address();
+}
+
+size_t DmaMappedBuffer::size() const
+{
+ return pimpl->size();
+}
+
+hailo_status DmaMappedBuffer::synchronize()
+{
+ static constexpr auto BUFFER_START = 0;
+ return pimpl->synchronize(BUFFER_START, size());
+}
+
+} /* namespace hailort */
--- /dev/null
+/**\r
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file mapped_buffer_factory.cpp\r
+ * @brief Static utility class for creating DmaMappedBuffers internally in hailort\r
+ **/\r
+\r
+#include "vdma/memory/mapped_buffer_factory.hpp"\r
+#include "vdma/memory/mapped_buffer_impl.hpp"\r
+\r
+namespace hailort\r
+{\r
+namespace vdma\r
+{\r
+\r
+Expected<DmaMappedBuffer> MappedBufferFactory::create_mapped_buffer(size_t size,\r
+ HailoRTDriver::DmaDirection data_direction, HailoRTDriver &driver)\r
+{\r
+ auto pimpl_exp = DmaMappedBuffer::Impl::create(driver, data_direction, size);\r
+ CHECK_EXPECTED(pimpl_exp);\r
+\r
+ auto pimpl = make_unique_nothrow<DmaMappedBuffer::Impl>(pimpl_exp.release());\r
+ CHECK_NOT_NULL_AS_EXPECTED(pimpl, HAILO_OUT_OF_HOST_MEMORY);\r
+ return DmaMappedBuffer(std::move(pimpl));\r
+}\r
+\r
+} /* namespace vdma */\r
+} /* namespace hailort */\r
--- /dev/null
+/**\r
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+**/\r
+/**\r
+ * @file mapped_buffer_factory.hpp\r
+ * @brief Static utility class for creating DmaMappedBuffers internally in hailort\r
+ **/\r
+\r
+#ifndef _HAILO_MAPPED_BUFFER_FACTORY_HPP_\r
+#define _HAILO_MAPPED_BUFFER_FACTORY_HPP_\r
+\r
+#include "hailo/hailort.h"\r
+#include "hailo/dma_mapped_buffer.hpp"\r
+#include "os/hailort_driver.hpp"\r
+\r
+namespace hailort\r
+{\r
+namespace vdma\r
+{\r
+\r
+class MappedBufferFactory\r
+{\r
+public:\r
+ MappedBufferFactory() = delete;\r
+ static Expected<DmaMappedBuffer> create_mapped_buffer(size_t size,\r
+ HailoRTDriver::DmaDirection data_direction, HailoRTDriver &driver);\r
+};\r
+\r
+} /* namespace vdma */\r
+} /* namespace hailort */\r
+\r
+#endif /* _HAILO_MAPPED_BUFFER_FACTORY_HPP_ */\r
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file mapped_buffer_impl.cpp
+ * @brief Dma mapped buffer pimpl class implementation
+ **/
+#include "mapped_buffer_impl.hpp"
+
+namespace hailort {
+
+#if defined(__linux__) || defined(_MSC_VER)
+
+Expected<DmaMappedBuffer::Impl> DmaMappedBuffer::Impl::create(HailoRTDriver &driver,
+ HailoRTDriver::DmaDirection data_direction, size_t size, void *user_address)
+{
+ if (nullptr != user_address) {
+ // User allocated buffer - create an empty MmapBuffer<void> (it doesn't hold the buffer)
+ auto status = HAILO_UNINITIALIZED;
+ auto result = DmaMappedBuffer::Impl(HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE, size,
+ data_direction, user_address, MmapBuffer<void>(), driver, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return result;
+ } else if (driver.allocate_driver_buffer()) {
+ // Allocate buffer via driver
+ auto driver_buffer_handle = driver.vdma_low_memory_buffer_alloc(size);
+ CHECK_EXPECTED(driver_buffer_handle);
+
+ uintptr_t driver_buff_handle = driver_buffer_handle.release();
+
+ auto mapped_buffer = MmapBuffer<void>::create_file_map(size, driver.fd(), driver_buff_handle);
+ CHECK_EXPECTED(mapped_buffer);
+
+ auto status = HAILO_UNINITIALIZED;
+ auto result = DmaMappedBuffer::Impl(driver_buff_handle, size, data_direction, mapped_buffer.release(),
+ driver, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return result;
+ } else {
+ // Standard userspace allocation
+ auto mapped_buffer = MmapBuffer<void>::create_shared_memory(size);
+ CHECK_EXPECTED(mapped_buffer);
+
+ auto status = HAILO_UNINITIALIZED;
+ auto result = DmaMappedBuffer::Impl(HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE, size,
+ data_direction, mapped_buffer.release(), driver, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return result;
+ }
+}
+
+DmaMappedBuffer::Impl::Impl(vdma_mapped_buffer_driver_identifier driver_allocated_buffer_id,
+ size_t size, HailoRTDriver::DmaDirection data_direction, void *user_address,
+ MmapBuffer<void> &&mapped_buffer, HailoRTDriver &driver, hailo_status &status) :
+ m_driver(driver),
+ m_driver_allocated_buffer_id(driver_allocated_buffer_id),
+ m_mapping_handle(HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE),
+ m_mapped_buffer(std::move(mapped_buffer)),
+ m_size(size),
+ m_data_direction(data_direction),
+ m_user_address(user_address)
+{
+ if (m_mapped_buffer.is_mapped() && (m_user_address != m_mapped_buffer.address())) {
+ status = HAILO_INVALID_ARGUMENT;
+ return;
+ }
+
+ auto expected_handle = driver.vdma_buffer_map(m_user_address, m_size, m_data_direction,
+ m_driver_allocated_buffer_id);
+ if (!expected_handle) {
+ status = expected_handle.status();
+ return;
+ }
+
+ m_mapping_handle = expected_handle.release();
+ status = HAILO_SUCCESS;
+}
+
+DmaMappedBuffer::Impl::Impl(vdma_mapped_buffer_driver_identifier driver_allocated_buffer_id,
+ size_t size, HailoRTDriver::DmaDirection data_direction,
+ MmapBuffer<void> &&mapped_buffer, HailoRTDriver &driver, hailo_status &status) :
+ Impl(driver_allocated_buffer_id, size, data_direction, mapped_buffer.address(), std::move(mapped_buffer), driver, status)
+{}
+
+DmaMappedBuffer::Impl::Impl(Impl &&other) noexcept :
+ m_driver(other.m_driver),
+ m_driver_allocated_buffer_id(std::exchange(other.m_driver_allocated_buffer_id, HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE)),
+ m_mapping_handle(std::exchange(other.m_mapping_handle, HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE)),
+ m_mapped_buffer(std::move(other.m_mapped_buffer)),
+ m_size(std::move(other.m_size)),
+ m_data_direction(std::move(other.m_data_direction)),
+ m_user_address(std::move(other.m_user_address))
+{}
+
+DmaMappedBuffer::Impl::~Impl()
+{
+ if (HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE != m_mapping_handle) {
+ m_driver.vdma_buffer_unmap(m_mapping_handle);
+ m_mapping_handle = HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE;
+ }
+
+ if (HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE != m_driver_allocated_buffer_id) {
+ m_driver.vdma_low_memory_buffer_free(m_driver_allocated_buffer_id);
+ m_driver_allocated_buffer_id = HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE;
+ }
+}
+
+void* DmaMappedBuffer::Impl::user_address()
+{
+ return m_user_address;
+}
+
+size_t DmaMappedBuffer::Impl::size() const
+{
+ return m_size;
+}
+
+HailoRTDriver::VdmaBufferHandle DmaMappedBuffer::Impl::handle()
+{
+ return m_mapping_handle;
+}
+
+hailo_status DmaMappedBuffer::Impl::synchronize(size_t offset, size_t count)
+{
+ CHECK(offset + count <= size(), HAILO_INVALID_ARGUMENT,
+ "Synchronizing {} bytes starting at offset {} will overflow (buffer size {})",
+ offset, count, size());
+ return m_driver.vdma_buffer_sync(m_mapping_handle, m_data_direction, offset, count);
+}
+
+#elif defined(__QNX__)
+
+#include <fcntl.h>
+
+const int DmaMappedBuffer::Impl::INVALID_FD = -1;
+const shm_handle_t DmaMappedBuffer::Impl::INVALID_HANDLE = (shm_handle_t)-1;
+const char* DmaMappedBuffer::Impl::VDMA_BUFFER_TYPE_MEMORY_NAME = "/memory/below4G/ram/below1G";
+
+Expected<DmaMappedBuffer::Impl> DmaMappedBuffer::Impl::create(HailoRTDriver &driver,
+ HailoRTDriver::DmaDirection data_direction, size_t size, void *user_address)
+{
+ // TODO: HRT-9508
+ CHECK_AS_EXPECTED(user_address == nullptr, HAILO_NOT_IMPLEMENTED, "User allocated buffers not supported on qnx");
+
+ // Destructor of type_mem_fd will close fd
+ FileDescriptor type_mem_fd(posix_typed_mem_open(VDMA_BUFFER_TYPE_MEMORY_NAME, O_RDWR, POSIX_TYPED_MEM_ALLOCATE));
+ if (INVALID_FD == type_mem_fd) {
+ LOGGER__ERROR("Error getting fd from typed memory of type {}, errno {}\n", VDMA_BUFFER_TYPE_MEMORY_NAME,
+ errno);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+
+ vdma_mapped_buffer_driver_identifier driver_buff_handle;
+ driver_buff_handle.shm_fd = shm_open(SHM_ANON, O_RDWR | O_CREAT, 0777);
+ CHECK_AS_EXPECTED(INVALID_FD != driver_buff_handle.shm_fd, HAILO_INTERNAL_FAILURE,
+ "Error creating shm object, errno is: {}", errno);
+
+ // backs the shared memory object with physical memory
+ int err = shm_ctl(driver_buff_handle.shm_fd, SHMCTL_ANON | SHMCTL_TYMEM, (uint64_t)type_mem_fd,
+ size);
+ if (-1 == err) {
+ LOGGER__ERROR("Error backing shm object in physical memory, errno is: {}", errno);
+ close(driver_buff_handle.shm_fd);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+
+ // Create shared memory handle to send to driver
+ err = shm_create_handle(driver_buff_handle.shm_fd, driver.resource_manager_pid(), O_RDWR,
+ &driver_buff_handle.shm_handle, 0);
+ if (0 != err) {
+ LOGGER__ERROR("Error creating shm object handle, errno is: {}", errno);
+ close(driver_buff_handle.shm_fd);
+ return make_unexpected(HAILO_INTERNAL_FAILURE);
+ }
+
+ void *address = mmap(0, size, PROT_WRITE | PROT_READ | PROT_NOCACHE, MAP_SHARED, driver_buff_handle.shm_fd, 0);
+ if (MAP_FAILED == address) {
+ LOGGER__ERROR("Failed to mmap buffer with errno:{}", errno);
+ shm_delete_handle(driver_buff_handle.shm_handle);
+ close(driver_buff_handle.shm_fd);
+ return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);
+ }
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto result = DmaMappedBuffer::Impl(address, size, data_direction, driver_buff_handle.shm_handle,
+ driver_buff_handle.shm_fd, driver, status);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to map buffer to vdma");
+ munmap(address, size);
+ shm_delete_handle(driver_buff_handle.shm_handle);
+ close(driver_buff_handle.shm_fd);
+ return make_unexpected(status);
+ }
+
+ return result;
+}
+
+DmaMappedBuffer::Impl::Impl(void *addr, size_t size, HailoRTDriver::DmaDirection data_direction,
+ shm_handle_t shm_handle, int shm_fd, HailoRTDriver &driver, hailo_status &status) :
+ m_driver(driver),
+ m_address(addr),
+ m_size(size),
+ m_data_direction(data_direction)
+{
+ m_driver_allocated_buffer_id.shm_handle = shm_handle;
+ m_driver_allocated_buffer_id.shm_fd = shm_fd;
+
+ auto expected_handle = driver.vdma_buffer_map(addr, size, data_direction, m_driver_allocated_buffer_id);
+ if (!expected_handle) {
+ status = expected_handle.status();
+ return;
+ }
+
+ m_mapping_handle = expected_handle.release();
+ status = HAILO_SUCCESS;
+}
+
+DmaMappedBuffer::Impl::Impl(Impl &&other) noexcept :
+ m_driver(other.m_driver),
+ m_address(std::exchange(other.m_address, nullptr)),
+ m_size(std::move(other.m_size)),
+ m_data_direction(std::move(other.m_data_direction)),
+ m_mapping_handle(std::exchange(other.m_mapping_handle, HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE))
+{
+ m_driver_allocated_buffer_id.shm_handle = std::exchange(other.m_driver_allocated_buffer_id.shm_handle, INVALID_HANDLE);
+ m_driver_allocated_buffer_id.shm_fd = std::exchange(other.m_driver_allocated_buffer_id.shm_fd, INVALID_FD);
+}
+
+DmaMappedBuffer::Impl::~Impl()
+{
+ if (HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE != m_mapping_handle) {
+ m_driver.vdma_buffer_unmap(m_mapping_handle);
+ m_mapping_handle = HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE;
+ }
+
+ if (nullptr != m_address) {
+ if (0 != munmap(m_address, m_size)) {
+ LOGGER__ERROR("Error unmapping memory at address {}, Errno: {}", m_address, errno);
+ }
+ }
+
+ if (INVALID_FD != m_driver_allocated_buffer_id.shm_fd) {
+ if (0 != close(m_driver_allocated_buffer_id.shm_fd)) {
+ LOGGER__ERROR("Error closing shared memory fd, Errno: {}", errno);
+ }
+ }
+}
+
+void* DmaMappedBuffer::Impl::user_address()
+{
+ return m_address;
+}
+size_t DmaMappedBuffer::Impl::size() const
+{
+ return m_size;
+}
+
+HailoRTDriver::VdmaBufferHandle DmaMappedBuffer::Impl::handle()
+{
+ return m_mapping_handle;
+}
+
+hailo_status DmaMappedBuffer::Impl::synchronize(size_t offset, size_t count)
+{
+ CHECK(offset + count <= size(), HAILO_INVALID_ARGUMENT,
+ "Synchronizing {} bytes starting at offset {} will overflow (buffer size {})",
+ offset, count, size());
+ return m_driver.vdma_buffer_sync(m_mapping_handle, m_data_direction, offset, count);
+}
+
+#else
+#error "unsupported platform!"
+#endif // defined(__linux__) || defined(_MSC_VER)
+
+} /* namespace hailort */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file mapped_buffer_impl.hpp
+ * @brief Vdma mapped buffer pimpl class defintion
+ **/
+#ifndef _HAILO_VDMA_MAPPED_BUFFER_IMPL_HPP_
+#define _HAILO_VDMA_MAPPED_BUFFER_IMPL_HPP_
+
+#include "hailo/dma_mapped_buffer.hpp"
+#include "os/mmap_buffer.hpp"
+#include "os/hailort_driver.hpp"
+#include "hailo/expected.hpp"
+
+namespace hailort {
+
+#if defined(__linux__) || defined(_MSC_VER)
+
+class DmaMappedBuffer::Impl final {
+public:
+ // If user_address is nullptr, a buffer of size 'size' will be allocated and mapped to dma in 'data_direction'
+ // Otherwise, the buffer pointed to by user_address will be mapped to dma in 'data_direction'
+ static Expected<Impl> create(HailoRTDriver &driver, HailoRTDriver::DmaDirection data_direction,
+ size_t size, void *user_address = nullptr);
+
+ Impl(Impl &&other) noexcept;
+ Impl(const Impl &other) = delete;
+ Impl &operator=(const Impl &other) = delete;
+ Impl &operator=(Impl &&other) = delete;
+ ~Impl();
+
+ void* user_address();
+ size_t size() const;
+ HailoRTDriver::VdmaBufferHandle handle();
+ // TODO: validate that offset is cache aligned (HRT-9811)
+ hailo_status synchronize(size_t offset, size_t count);
+
+private:
+ Impl(vdma_mapped_buffer_driver_identifier driver_allocated_buffer_id, size_t size,
+ HailoRTDriver::DmaDirection data_direction, void *user_address, MmapBuffer<void> &&mapped_buffer,
+ HailoRTDriver &driver, hailo_status &status);
+ Impl(vdma_mapped_buffer_driver_identifier driver_allocated_buffer_id, size_t size,
+ HailoRTDriver::DmaDirection data_direction, MmapBuffer<void> &&mapped_buffer, HailoRTDriver &driver,
+ hailo_status &status);
+
+ HailoRTDriver &m_driver;
+ vdma_mapped_buffer_driver_identifier m_driver_allocated_buffer_id;
+ HailoRTDriver::VdmaBufferHandle m_mapping_handle;
+ MmapBuffer<void> m_mapped_buffer;
+ const size_t m_size;
+ const HailoRTDriver::DmaDirection m_data_direction;
+ void *const m_user_address;
+};
+
+#elif defined(__QNX__)
+
+// TODO: merge qnx and non-qnx impls (HRT-9508)
+class DmaMappedBuffer::Impl final {
+public:
+ static Expected<Impl> create(HailoRTDriver &driver, HailoRTDriver::DmaDirection data_direction,
+ size_t size, void *user_address = nullptr);
+
+ Impl(const Impl &other) = delete;
+ Impl &operator=(const Impl &other) = delete;
+ Impl &operator=(Impl &&other) = delete;
+ Impl(Impl &&other) noexcept;
+ ~Impl();
+
+ void* user_address();
+ size_t size() const;
+ HailoRTDriver::VdmaBufferHandle handle();
+ hailo_status synchronize(size_t offset, size_t count);
+
+private:
+ Impl(void *addr, size_t size, HailoRTDriver::DmaDirection data_direction,
+ shm_handle_t shm_handle, int shm_fd, HailoRTDriver &driver, hailo_status &status);
+
+ static const int INVALID_FD;
+ static const shm_handle_t INVALID_HANDLE;
+ static const char* VDMA_BUFFER_TYPE_MEMORY_NAME;
+
+ HailoRTDriver &m_driver;
+ void *m_address;
+ const size_t m_size;
+ const HailoRTDriver::DmaDirection m_data_direction;
+ vdma_mapped_buffer_driver_identifier m_driver_allocated_buffer_id;
+ HailoRTDriver::VdmaBufferHandle m_mapping_handle;
+};
+
+#else
+#error "unsupported platform!"
+#endif // defined(__linux__) || defined(_MSC_VER)
+
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_MAPPED_BUFFER_IMPL_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_sg_buffer.cpp
+ * @brief Scatter-gather vdma buffer.
+ **/
+
+#include "vdma/memory/sg_buffer.hpp"
+#include "vdma/channel/channel_id.hpp"
+#include "vdma/memory/mapped_buffer_factory.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+Expected<SgBuffer> SgBuffer::create(HailoRTDriver &driver, size_t size, uint32_t desc_count, uint16_t desc_page_size,
+ HailoRTDriver::DmaDirection data_direction, ChannelId channel_id)
+{
+ CHECK_AS_EXPECTED(size <= (desc_count * desc_page_size), HAILO_INTERNAL_FAILURE,
+ "Requested buffer size {} must be smaller than {}", size, (desc_count * desc_page_size));
+ CHECK_AS_EXPECTED((size % desc_page_size) == 0, HAILO_INTERNAL_FAILURE,
+ "SgBuffer size must be a multiple of descriptors page size (size {})", size);
+
+ auto mapped_buffer_exp = MappedBufferFactory::create_mapped_buffer(size,
+ data_direction, driver);
+ CHECK_EXPECTED(mapped_buffer_exp);
+
+ auto mapped_buffer = make_shared_nothrow<DmaMappedBuffer>(mapped_buffer_exp.release());
+ CHECK_NOT_NULL_AS_EXPECTED(mapped_buffer, HAILO_OUT_OF_HOST_MEMORY);
+
+ auto desc_list_exp = DescriptorList::create(desc_count, desc_page_size, driver);
+ CHECK_EXPECTED(desc_list_exp);
+
+ auto desc_list = make_shared_nothrow<DescriptorList>(desc_list_exp.release());
+ CHECK_NOT_NULL_AS_EXPECTED(desc_list, HAILO_OUT_OF_HOST_MEMORY);
+
+ assert((desc_count * desc_page_size) <= std::numeric_limits<uint32_t>::max());
+
+ auto status = desc_list->configure_to_use_buffer(*mapped_buffer, channel_id);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return SgBuffer(mapped_buffer, desc_list);
+}
+
+SgBuffer::SgBuffer(std::shared_ptr<DmaMappedBuffer> mapped_buffer, std::shared_ptr<DescriptorList> desc_list) :
+ m_mapped_buffer(mapped_buffer),
+ m_desc_list(desc_list)
+{}
+
+size_t SgBuffer::size() const
+{
+ return m_mapped_buffer->size();
+}
+
+uint64_t SgBuffer::dma_address() const
+{
+ return m_desc_list->dma_address();
+}
+
+uint16_t SgBuffer::desc_page_size() const
+{
+ return m_desc_list->desc_page_size();
+}
+
+uint32_t SgBuffer::descs_count() const
+{
+ return static_cast<uint32_t>(m_desc_list->count());
+}
+
+uint8_t SgBuffer::depth() const
+{
+ return m_desc_list->depth();
+}
+
+std::shared_ptr<DescriptorList> SgBuffer::get_desc_list()
+{
+ return m_desc_list;
+}
+
+// TODO: Remove after HRT-7838
+void* SgBuffer::get_user_address()
+{
+ return m_mapped_buffer->user_address();
+}
+
+hailo_status SgBuffer::read(void *buf_dst, size_t count, size_t offset, bool should_sync)
+{
+ CHECK(count + offset <= m_mapped_buffer->size(), HAILO_INSUFFICIENT_BUFFER);
+ if (count == 0) {
+ return HAILO_SUCCESS;
+ }
+
+ if (should_sync) {
+ const auto status = m_mapped_buffer->synchronize();
+ CHECK_SUCCESS(status, "Failed synching SgBuffer buffer on read");
+ }
+
+ const auto src_addr = static_cast<uint8_t*>(m_mapped_buffer->user_address()) + offset;
+ memcpy(buf_dst, src_addr, count);
+
+ return HAILO_SUCCESS;
+}
+hailo_status SgBuffer::write(const void *buf_src, size_t count, size_t offset)
+{
+ CHECK(count + offset <= m_mapped_buffer->size(), HAILO_INSUFFICIENT_BUFFER);
+ if (count == 0) {
+ return HAILO_SUCCESS;
+ }
+
+ const auto dst_addr = static_cast<uint8_t*>(m_mapped_buffer->user_address()) + offset;
+ std::memcpy(dst_addr, buf_src, count);
+
+ const auto status = m_mapped_buffer->synchronize();
+ CHECK_SUCCESS(status, "Failed synching SgBuffer buffer on write");
+
+ return HAILO_SUCCESS;
+}
+
+Expected<uint32_t> SgBuffer::program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
+ size_t desc_offset, bool is_circular)
+{
+ return m_desc_list->program_last_descriptor(transfer_size, last_desc_interrupts_domain, desc_offset, is_circular);
+}
+
+hailo_status SgBuffer::reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
+ InterruptsDomain new_interrupts_domain)
+{
+ const auto desc_per_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
+ const auto num_desc_in_batch = desc_per_transfer * batch_size;
+ const auto last_desc_index_in_batch = num_desc_in_batch - 1;
+ return m_desc_list->reprogram_descriptor_interrupts_domain(last_desc_index_in_batch, new_interrupts_domain);
+}
+
+}
+}
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file sg_buffer.hpp
+ * @brief Scatter-gather vdma buffer, from the user-mode point of view the buffer is continuous,
+ * but not from the physical-memory point of view.
+ * The sg buffer contains 2 parts:
+ * - DmaMappedBuffer - the actual buffer stores the data.
+ * - Descriptors list - each descritpor points to a single "dma page" in the DmaMappedBuffer.
+ * The hw accept the descriptors list address and parses it to get the actual data.
+ **/
+
+#ifndef _HAILO_VDMA_SG_BUFFER_HPP_
+#define _HAILO_VDMA_SG_BUFFER_HPP_
+
+#include "hailo/dma_mapped_buffer.hpp"
+
+#include "os/hailort_driver.hpp"
+#include "vdma/memory/vdma_buffer.hpp"
+#include "vdma/memory/descriptor_list.hpp"
+
+
+namespace hailort {
+namespace vdma {
+
+class SgBuffer final : public VdmaBuffer {
+public:
+ static Expected<SgBuffer> create(HailoRTDriver &driver, size_t size, uint32_t desc_count, uint16_t desc_page_size,
+ HailoRTDriver::DmaDirection data_direction, vdma::ChannelId channel_id);
+
+ virtual ~SgBuffer() = default;
+
+ SgBuffer(const SgBuffer &) = delete;
+ SgBuffer(SgBuffer &&) = default;
+ SgBuffer& operator=(const SgBuffer &) = delete;
+ SgBuffer& operator=(SgBuffer &&) = delete;
+
+ virtual Type type() const override
+ {
+ return Type::SCATTER_GATHER;
+ }
+
+ virtual size_t size() const override;
+ virtual uint64_t dma_address() const override;
+ virtual uint16_t desc_page_size() const override;
+ virtual uint32_t descs_count() const override;
+ uint8_t depth() const;
+
+ std::shared_ptr<DescriptorList> get_desc_list();
+ // TODO: Remove after HRT-7838
+ void *get_user_address();
+
+ virtual hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync) override;
+ virtual hailo_status write(const void *buf_src, size_t count, size_t offset) override;
+
+ virtual Expected<uint32_t> program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
+ size_t desc_offset, bool is_circular) override;
+ virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
+ InterruptsDomain new_interrupts_domain) override;
+
+private:
+ SgBuffer(std::shared_ptr<DmaMappedBuffer> mapped_buffer, std::shared_ptr<DescriptorList> desc_list);
+
+ // Initialization Dependency: The descriptor list points into the mapped buffer so it must be freed before it
+ std::shared_ptr<DmaMappedBuffer> m_mapped_buffer;
+ std::shared_ptr<DescriptorList> m_desc_list;
+};
+
+} /* vdma */
+} /* hailort */
+
+#endif /* _HAILO_VDMA_SG_BUFFER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_buffer.cpp
+ * @brief vdma buffer.
+ **/
+
+#include "vdma_buffer.hpp"
+#include "control_protocol.h"
+
+namespace hailort {
+namespace vdma {
+
+CONTROL_PROTOCOL__host_buffer_info_t VdmaBuffer::get_host_buffer_info(uint32_t transfer_size)
+{
+ return get_host_buffer_info(type(), dma_address(), desc_page_size(), descs_count(), transfer_size);
+}
+
+CONTROL_PROTOCOL__host_buffer_info_t VdmaBuffer::get_host_buffer_info(Type type, uint64_t dma_address,
+ uint16_t desc_page_size, uint32_t desc_count, uint32_t transfer_size)
+{
+ CONTROL_PROTOCOL__host_buffer_info_t buffer_info{};
+ buffer_info.buffer_type = static_cast<uint8_t>((type == vdma::VdmaBuffer::Type::SCATTER_GATHER) ?
+ CONTROL_PROTOCOL__HOST_BUFFER_TYPE_EXTERNAL_DESC :
+ CONTROL_PROTOCOL__HOST_BUFFER_TYPE_CCB);
+ buffer_info.dma_address = dma_address;
+ buffer_info.desc_page_size = desc_page_size;
+ buffer_info.total_desc_count = desc_count;
+ buffer_info.bytes_in_pattern = transfer_size;
+
+ return buffer_info;
+}
+
+}
+}
\ No newline at end of file
--- /dev/null
+/**\r
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.\r
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
+ **/\r
+/**\r
+ * @file vdma_buffer.hpp\r
+ * @brief Abstract layer representing a vdma buffer (buffer that can be read/written to the device over vdma.)\r
+ * The buffer can be either non-continuous with attach descriptors list (SgBuffer) or continuous buffer.\r
+ **/\r
+\r
+#ifndef _HAILO_VDMA_VDMA_BUFFER_HPP_\r
+#define _HAILO_VDMA_VDMA_BUFFER_HPP_\r
+\r
+#include "os/hailort_driver.hpp"\r
+#include "vdma/memory/descriptor_list.hpp"\r
+#include "control_protocol.h"\r
+\r
+\r
+namespace hailort {\r
+namespace vdma {\r
+\r
+class VdmaBuffer {\r
+public:\r
+\r
+ enum class Type {\r
+ SCATTER_GATHER,\r
+ CONTINUOUS\r
+ };\r
+\r
+ virtual ~VdmaBuffer() = default;\r
+\r
+ VdmaBuffer() = default;\r
+ VdmaBuffer(const VdmaBuffer &) = delete;\r
+ VdmaBuffer(VdmaBuffer &&) = default;\r
+ VdmaBuffer& operator=(const VdmaBuffer &) = delete;\r
+ VdmaBuffer& operator=(VdmaBuffer &&) = delete;\r
+\r
+ virtual Type type() const = 0;\r
+ virtual size_t size() const = 0;\r
+ virtual uint64_t dma_address() const = 0;\r
+ virtual uint16_t desc_page_size() const = 0;\r
+ virtual uint32_t descs_count() const = 0;\r
+\r
+ uint32_t descriptors_in_buffer(size_t buffer_size) const\r
+ {\r
+ assert(buffer_size < std::numeric_limits<uint32_t>::max());\r
+ const auto page_size = desc_page_size();\r
+ return static_cast<uint32_t>(DIV_ROUND_UP(buffer_size, page_size));\r
+ }\r
+\r
+ virtual hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync = true) = 0;\r
+ virtual hailo_status write(const void *buf_src, size_t count, size_t offset) = 0;\r
+\r
+ virtual Expected<uint32_t> program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,\r
+ size_t desc_offset, bool is_circular) = 0;\r
+ virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,\r
+ InterruptsDomain new_interrupts_domain) = 0;\r
+ \r
+ CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info(uint32_t transfer_size);\r
+ static CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info(Type type, uint64_t dma_address,\r
+ uint16_t desc_page_size, uint32_t total_desc_count, uint32_t transfer_size);\r
+};\r
+\r
+} /* vdma */\r
+} /* hailort */\r
+\r
+#endif /* _HAILO_VDMA_VDMA_BUFFER_HPP_ */\r
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file pcie_device.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "hailo/hailort.h"
+#include "hailo/device.hpp"
+#include "hailo/hef.hpp"
+
+#include "common/utils.hpp"
+#include "common/compiler_extensions_compat.hpp"
+
+#include "vdma/pcie/pcie_device.hpp"
+#include "device_common/control.hpp"
+#include "os/hailort_driver.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+#include "vdma/vdma_config_manager.hpp"
+
+#include <new>
+#include <algorithm>
+
+
+namespace hailort
+{
+
+Expected<std::vector<hailo_pcie_device_info_t>> PcieDevice::scan()
+{
+ auto scan_results = HailoRTDriver::scan_devices();
+ CHECK_EXPECTED(scan_results);
+
+ std::vector<hailo_pcie_device_info_t> out_results;
+ out_results.reserve(scan_results->size());
+ for (const auto &scan_result : scan_results.value()) {
+ const bool DONT_LOG_ON_FAILURE = true;
+ auto device_info = parse_pcie_device_info(scan_result.device_id, DONT_LOG_ON_FAILURE);
+ if (device_info) {
+ out_results.emplace_back(device_info.release());
+ }
+ }
+
+ return out_results;
+}
+
+Expected<std::unique_ptr<PcieDevice>> PcieDevice::create()
+{
+ // Take the first device
+ auto scan_result = scan();
+ CHECK_EXPECTED(scan_result, "Failed scanning pcie devices");
+ CHECK_AS_EXPECTED(scan_result->size() == 1, HAILO_INVALID_OPERATION,
+ "Expected only 1 PCIe device. Pass `hailo_pcie_device_info_t` to create a specific PCIe device");
+ return create(scan_result->at(0));
+}
+
+Expected<std::unique_ptr<PcieDevice>> PcieDevice::create(const hailo_pcie_device_info_t &pcie_device_info)
+{
+ auto device_info = find_device_info(pcie_device_info);
+ CHECK_EXPECTED(device_info);
+
+ auto pcie_device_info_str = pcie_device_info_to_string(pcie_device_info);
+ CHECK_EXPECTED(pcie_device_info_str);
+
+ auto driver = HailoRTDriver::create(device_info->dev_path);
+ CHECK_EXPECTED(driver);
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto device = std::unique_ptr<PcieDevice>(new (std::nothrow) PcieDevice(driver.release(), pcie_device_info, status,
+ pcie_device_info_str.release()));
+ CHECK_AS_EXPECTED((nullptr != device), HAILO_OUT_OF_HOST_MEMORY);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating PcieDevice");
+ return device;
+}
+
+// same format as in lspci - [<domain>].<bus>.<device>.<func>
+// domain (0 to ffff) bus (0 to ff), device (0 to 1f) and function (0 to 7).
+static const char *DEVICE_ID_STRING_FMT_SHORT = "%02x:%02x.%d";
+static constexpr int DEVICE_ID_ELEMENTS_COUNT_SHORT = 3;
+static constexpr int DEVICE_ID_STRING_LENGTH_SHORT = 7; // Length without null terminator
+
+static const char *DEVICE_ID_STRING_FMT_LONG = "%04x:%02x:%02x.%d";
+static constexpr int DEVICE_ID_ELEMENTS_COUNT_LONG = 4;
+static constexpr int DEVICE_ID_STRING_LENGTH_LONG = 12; // Length without null terminator
+
+static constexpr int DEVICE_ID_MAX_STRING_LENGTH = std::max(DEVICE_ID_STRING_LENGTH_SHORT, DEVICE_ID_STRING_LENGTH_LONG);
+
+Expected<hailo_pcie_device_info_t> PcieDevice::parse_pcie_device_info(const std::string &device_info_str,
+ bool log_on_failure)
+{
+ hailo_pcie_device_info_t device_info{};
+ int scanf_res = sscanf(device_info_str.c_str(), DEVICE_ID_STRING_FMT_LONG,
+ &device_info.domain, &device_info.bus, &device_info.device, &device_info.func);
+ if (DEVICE_ID_ELEMENTS_COUNT_LONG != scanf_res) {
+ // Domain not included, trying short
+ device_info.domain = HAILO_PCIE_ANY_DOMAIN;
+ scanf_res = sscanf(device_info_str.c_str(), DEVICE_ID_STRING_FMT_SHORT,
+ &device_info.bus, &device_info.device, &device_info.func);
+ if (DEVICE_ID_ELEMENTS_COUNT_SHORT != scanf_res) {
+ if (log_on_failure) {
+ LOGGER__ERROR("Invalid device info string (format is [<domain>].<bus>.<device>.<func>) {}", device_info_str);
+ }
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+ }
+
+ return device_info;
+}
+
+Expected<std::string> PcieDevice::pcie_device_info_to_string(const hailo_pcie_device_info_t &device_info)
+{
+ char device_string[DEVICE_ID_MAX_STRING_LENGTH + 1] = { 0 };
+
+ if (HAILO_PCIE_ANY_DOMAIN != device_info.domain) {
+ int res = snprintf(device_string, DEVICE_ID_STRING_LENGTH_LONG + 1, DEVICE_ID_STRING_FMT_LONG,
+ device_info.domain, device_info.bus, device_info.device, device_info.func);
+ // If the users give invalid device_info on release, they will get an invalid string.
+ CHECK_AS_EXPECTED((DEVICE_ID_STRING_LENGTH_LONG) == res, HAILO_INVALID_ARGUMENT, "Invalid device info");
+ }
+ else {
+ int res = snprintf(device_string, DEVICE_ID_STRING_LENGTH_SHORT + 1, DEVICE_ID_STRING_FMT_SHORT,
+ device_info.bus, device_info.device, device_info.func);
+ // If the users gives invalid device_info on release, they will get an invalid string.
+ CHECK_AS_EXPECTED((DEVICE_ID_STRING_LENGTH_SHORT) == res, HAILO_INVALID_ARGUMENT, "Invalid device info");
+ }
+
+ return std::string(device_string);
+}
+
+PcieDevice::PcieDevice(HailoRTDriver &&driver, const hailo_pcie_device_info_t &device_info, hailo_status &status,
+ const std::string &device_id) :
+ VdmaDevice::VdmaDevice(std::move(driver), Device::Type::PCIE, device_id),
+ m_device_info(device_info)
+{
+ if (driver.is_fw_loaded()) {
+ status = update_fw_state();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("update_fw_state() failed with status {}", status);
+ return;
+ }
+ } else {
+ LOGGER__WARNING("FW is not loaded to the device. Please load FW before using the device.");
+ m_is_control_version_supported = false;
+ }
+
+ m_device_id = device_id;
+
+ status = HAILO_SUCCESS;
+}
+
+void PcieDevice::set_is_control_version_supported(bool value)
+{
+ m_is_control_version_supported = value;
+}
+
+Expected<hailo_device_architecture_t> PcieDevice::get_architecture() const
+{
+ if (!m_driver.is_fw_loaded()) {
+ LOGGER__WARNING("FW is not loaded to the device. Please load FW before using the device.");
+ return make_unexpected(HAILO_INVALID_OPERATION);
+ }
+
+ return Expected<hailo_device_architecture_t>(m_device_architecture);
+}
+
+hailo_status PcieDevice::direct_write_memory(uint32_t address, const void *buffer, uint32_t size)
+{
+ return m_driver.write_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
+}
+
+hailo_status PcieDevice::direct_read_memory(uint32_t address, void *buffer, uint32_t size)
+{
+ return m_driver.read_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
+}
+
+const char *PcieDevice::get_dev_id() const
+{
+ return m_device_id.c_str();
+}
+
+hailo_status PcieDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+ HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
+ CONTROL_PROTOCOL__request_t request = {};
+ size_t request_size = 0;
+ uint8_t response_buffer[RESPONSE_MAX_BUFFER_SIZE] = {};
+ size_t response_size = RESPONSE_MAX_BUFFER_SIZE;
+ CONTROL_PROTOCOL__response_header_t *header = NULL;
+ CONTROL_PROTOCOL__payload_t *payload = NULL;
+ bool is_expecting_response = true;
+
+ CHECK(CONTROL_PROTOCOL__RESET_TYPE__CHIP != reset_type, HAILO_INVALID_OPERATION,
+ "Chip reset is not supported for PCIe device.");
+
+ if ((CONTROL_PROTOCOL__RESET_TYPE__FORCED_SOFT == reset_type) || (CONTROL_PROTOCOL__RESET_TYPE__SOFT == reset_type)) {
+ is_expecting_response = false; // TODO: Check boot source, set is_expecting_response = (boot_source != pcie)
+ }
+
+ common_status = CONTROL_PROTOCOL__pack_reset_request(&request, &request_size, m_control_sequence, reset_type);
+ status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
+ CHECK_SUCCESS(status);
+
+ LOGGER__DEBUG("Sending reset request");
+ status = this->fw_interact((uint8_t*)(&request), request_size, (uint8_t*)&response_buffer, &response_size);
+ // fw_interact should return failure if response is not expected
+ // TODO: fix logic with respect to is_expecting_response, implement wait_for_wakeup();
+ if (HAILO_SUCCESS == status) {
+ status = Control::parse_and_validate_response(response_buffer, (uint32_t)(response_size), &header,
+ &payload, &request);
+ CHECK_SUCCESS(status);
+ CHECK(is_expecting_response, HAILO_INTERNAL_FAILURE, "Recived valid response from FW for control who is not expecting one.");
+ } else if ((HAILO_FW_CONTROL_FAILURE == status) && (!is_expecting_response)){
+ status = HAILO_SUCCESS;
+ } else {
+ return status;
+ }
+
+ LOGGER__DEBUG("Board has been reset successfully");
+ return HAILO_SUCCESS;
+}
+
+Expected<HailoRTDriver::DeviceInfo> PcieDevice::find_device_info(const hailo_pcie_device_info_t &pcie_device_info)
+{
+ auto scan_results = HailoRTDriver::scan_devices();
+ CHECK_EXPECTED(scan_results);
+
+ // Find device index based on the information from "device_info"
+ for (const auto &scan_result : scan_results.value()) {
+ const bool DONT_LOG_ON_FAILURE = false;
+ auto scanned_info = parse_pcie_device_info(scan_result.device_id, DONT_LOG_ON_FAILURE);
+ if (!scanned_info) {
+ continue;
+ }
+
+ const bool match = (pcie_device_info.bus == scanned_info->bus) &&
+ (pcie_device_info.device == scanned_info->device) &&
+ (pcie_device_info.func == scanned_info->func) &&
+ ((HAILO_PCIE_ANY_DOMAIN == pcie_device_info.domain) || (pcie_device_info.domain == scanned_info->domain));
+ if (match) {
+ return HailoRTDriver::DeviceInfo(scan_result);
+ }
+ }
+
+ LOGGER__ERROR("Requested device not found");
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file pcie_device.hpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#ifndef HAILO_PCIE_DEVICE_H_
+#define HAILO_PCIE_DEVICE_H_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "vdma/channel/boundary_channel.hpp"
+#include "vdma/vdma_device.hpp"
+
+
+namespace hailort
+{
+
+class PcieDevice : public VdmaDevice {
+public:
+ static Expected<std::vector<hailo_pcie_device_info_t>> scan();
+ static Expected<std::unique_ptr<PcieDevice>> create();
+ static Expected<std::unique_ptr<PcieDevice>> create(const hailo_pcie_device_info_t &device_info);
+ static Expected<hailo_pcie_device_info_t> parse_pcie_device_info(const std::string &device_info_str,
+ bool log_on_failure);
+ static Expected<std::string> pcie_device_info_to_string(const hailo_pcie_device_info_t &device_info);
+
+ virtual ~PcieDevice() = default;
+
+ virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
+ virtual hailo_status direct_write_memory(uint32_t address, const void *buffer, uint32_t size) override;
+ virtual hailo_status direct_read_memory(uint32_t address, void *buffer, uint32_t size) override;
+ virtual bool is_stream_interface_supported(const hailo_stream_interface_t& stream_interface) const override
+ {
+ switch (stream_interface) {
+ case HAILO_STREAM_INTERFACE_ETH:
+ case HAILO_STREAM_INTERFACE_INTEGRATED:
+ return false;
+ case HAILO_STREAM_INTERFACE_PCIE:
+ case HAILO_STREAM_INTERFACE_MIPI:
+ return true;
+ default:
+ LOGGER__ERROR("Invalid stream interface");
+ return false;
+ }
+ }
+
+ // TODO: used for tests
+ void set_is_control_version_supported(bool value);
+ virtual Expected<hailo_device_architecture_t> get_architecture() const override;
+
+ const hailo_pcie_device_info_t get_device_info() const
+ {
+ return m_device_info;
+ }
+ virtual const char* get_dev_id() const override;
+
+private:
+ PcieDevice(HailoRTDriver &&driver, const hailo_pcie_device_info_t &device_info, hailo_status &status,
+ const std::string &device_id);
+
+ static Expected<HailoRTDriver::DeviceInfo> find_device_info(const hailo_pcie_device_info_t &pcie_device_info);
+
+ const hailo_pcie_device_info_t m_device_info;
+ std::string m_device_id;
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_PCIE_DEVICE_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_sg_buffer.cpp
- * @brief Scatter-gather vdma buffer.
- **/
-
-#include "sg_buffer.hpp"
-
-namespace hailort {
-namespace vdma {
-
-Expected<SgBuffer> SgBuffer::create(HailoRTDriver &driver, uint32_t desc_count, uint16_t desc_page_size,
- HailoRTDriver::DmaDirection data_direction, uint8_t channel_index)
-{
- auto desc_list = VdmaDescriptorList::create(desc_count, desc_page_size, driver);
- CHECK_EXPECTED(desc_list);
-
- assert((desc_count * desc_page_size) <= std::numeric_limits<uint32_t>::max());
- auto mapped_buffer = MappedBuffer::create(desc_count * desc_page_size, data_direction, driver);
- CHECK_EXPECTED(mapped_buffer);
-
- auto status = desc_list->configure_to_use_buffer(mapped_buffer.value(), channel_index);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return SgBuffer(desc_list.release(), mapped_buffer.release());
-}
-
-size_t SgBuffer::size() const
-{
- return m_mapped_buffer.size();
-}
-
-uint64_t SgBuffer::dma_address() const
-{
- return m_desc_list.dma_address();
-}
-
-uint16_t SgBuffer::desc_page_size() const
-{
- return m_desc_list.desc_page_size();
-}
-
-uint32_t SgBuffer::descs_count() const
-{
- return (uint32_t)m_desc_list.count();
-}
-
-uint8_t SgBuffer::depth() const
-{
- return m_desc_list.depth();
-}
-
-ExpectedRef<VdmaDescriptorList> SgBuffer::get_desc_list()
-{
- return std::ref(m_desc_list);
-}
-
-hailo_status SgBuffer::read(void *buf_dst, size_t count, size_t offset, bool should_sync)
-{
- return m_mapped_buffer.read(buf_dst, count, offset, should_sync);
-}
-
-hailo_status SgBuffer::write(const void *buf_src, size_t count, size_t offset)
-{
- return m_mapped_buffer.write(buf_src, count, offset);
-}
-
-hailo_status SgBuffer::read_cyclic(void *buf_dst, size_t count, size_t offset, bool should_sync)
-{
- return m_mapped_buffer.read_cyclic(buf_dst, count, offset, should_sync);
-}
-
-hailo_status SgBuffer::write_cyclic(const void *buf_src, size_t count, size_t offset)
-{
- return m_mapped_buffer.write_cyclic(buf_src, count, offset);
-}
-
-// TODO: Remove after HRT-7838
-void* SgBuffer::get_user_address()
-{
- return m_mapped_buffer.user_address();
-}
-
-Expected<uint32_t> SgBuffer::program_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular)
-{
- return m_desc_list.program_descriptors(transfer_size, first_desc_interrupts_domain, last_desc_interrupts_domain,
- desc_offset, is_circular);
-}
-
-hailo_status SgBuffer::reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
- VdmaInterruptsDomain new_interrupts_domain)
-{
- const auto desc_per_transfer = m_desc_list.descriptors_in_buffer(transfer_size);
- const auto num_desc_in_batch = desc_per_transfer * batch_size;
- const auto last_desc_index_in_batch = num_desc_in_batch - 1;
- return m_desc_list.reprogram_descriptor_interrupts_domain(last_desc_index_in_batch, new_interrupts_domain);
-}
-
-hailo_status SgBuffer::reprogram_buffer_offset(size_t new_start_offset, uint8_t channel_index)
-{
- return m_desc_list.configure_to_use_buffer(m_mapped_buffer, channel_index, new_start_offset);
-}
-
-}
-}
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file sg_buffer.hpp
- * @brief Scatter-gather vdma buffer, from the user-mode point of view the buffer is continuous,
- * but not from the physical-memory point of view.
- * The sg buffer contains 2 parts:
- * - MappedBuffer - the actual buffer stores the data.
- * - Descriptors list - each descritpor points to a single "dma page" in the MappedBuffer.
- * The hw accept the descriptors list address and parses it to get the actual data.
- **/
-
-#ifndef _HAILO_VDMA_SG_BUFFER_HPP_
-#define _HAILO_VDMA_SG_BUFFER_HPP_
-
-#include "os/hailort_driver.hpp"
-#include "vdma/vdma_buffer.hpp"
-#include "vdma_descriptor_list.hpp"
-#include "vdma/mapped_buffer.hpp"
-
-namespace hailort {
-namespace vdma {
-
-class SgBuffer final : public VdmaBuffer {
-public:
- static Expected<SgBuffer> create(HailoRTDriver &driver, uint32_t desc_count, uint16_t desc_page_size,
- HailoRTDriver::DmaDirection data_direction, uint8_t channel_index = HailoRTDriver::INVALID_VDMA_CHANNEL_INDEX);
-
- virtual ~SgBuffer() = default;
-
- SgBuffer(const SgBuffer &) = delete;
- SgBuffer(SgBuffer &&) = default;
- SgBuffer& operator=(const SgBuffer &) = delete;
- SgBuffer& operator=(SgBuffer &&) = delete;
-
- virtual Type type() const override
- {
- return Type::SCATTER_GATHER;
- }
-
- virtual size_t size() const override;
- virtual uint64_t dma_address() const override;
- virtual uint16_t desc_page_size() const override;
- virtual uint32_t descs_count() const override;
- uint8_t depth() const;
-
- ExpectedRef<VdmaDescriptorList> get_desc_list();
- // TODO: Remove after HRT-7838
- void *get_user_address();
-
- virtual hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync) override;
- virtual hailo_status write(const void *buf_src, size_t count, size_t offset) override;
-
- hailo_status read_cyclic(void *buf_dst, size_t count, size_t offset, bool should_sync = true);
- hailo_status write_cyclic(const void *buf_src, size_t count, size_t offset);
-
- virtual Expected<uint32_t> program_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular) override;
- virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
- VdmaInterruptsDomain new_interrupts_domain) override;
-
- // TODO: after HRT-8519 the VdmaDescriptorList will be owned by the vdma channel and this function can be removed
- // (VdmaChannel::reprogram_buffer_offset will call VdmaDescriptorList::configure_to_use_buffer directly)
- hailo_status reprogram_buffer_offset(size_t new_start_offset, uint8_t channel_index);
-
-private:
- SgBuffer(VdmaDescriptorList &&desc_list, MappedBuffer &&mapped_buffer) :
- m_desc_list(std::move(desc_list)),
- m_mapped_buffer(std::move(mapped_buffer))
- {}
-
- VdmaDescriptorList m_desc_list;
- MappedBuffer m_mapped_buffer;
-};
-
-} /* vdma */
-} /* hailort */
-
-#endif /* _HAILO_VDMA_SG_BUFFER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_async_stream.cpp
+ * @brief Async vdma stream implementation
+ **/
+
+#include "hailo/hailort_common.hpp"
+
+#include "vdma/vdma_async_stream.hpp"
+
+
+namespace hailort
+{
+
+VdmaAsyncInputStream::VdmaAsyncInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
+ const LayerInfo &edge_layer, EventPtr core_op_activated_event,
+ uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
+ hailo_stream_interface_t stream_interface, hailo_status &status) :
+ VdmaInputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size,
+ transfer_timeout, stream_interface, status)
+{
+ // Checking status for base class c'tor
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<size_t> VdmaAsyncInputStream::sync_write_raw_buffer(const MemoryView &)
+{
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+hailo_status VdmaAsyncInputStream::sync_write_all_raw_buffer_no_transform_impl(void *, size_t, size_t)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status VdmaAsyncInputStream::wait_for_ready(size_t transfer_size, std::chrono::milliseconds timeout)
+{
+ return m_channel->wait(transfer_size, timeout);
+}
+
+hailo_status VdmaAsyncInputStream::write_async(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque)
+{
+ return m_channel->transfer(buffer, user_callback, opaque);
+}
+
+/** Output stream **/
+
+VdmaAsyncOutputStream::VdmaAsyncOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+ hailo_status &status) :
+ VdmaOutputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size,
+ transfer_timeout, interface, status)
+{
+ // Check status for base class c'tor
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<size_t> VdmaAsyncOutputStream::sync_read_raw_buffer(MemoryView &)
+{
+ return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
+hailo_status VdmaAsyncOutputStream::read_all(MemoryView &)
+{
+ return HAILO_NOT_IMPLEMENTED;
+}
+
+hailo_status VdmaAsyncOutputStream::wait_for_ready(size_t transfer_size, std::chrono::milliseconds timeout)
+{
+ return m_channel->wait(transfer_size, timeout);
+}
+
+hailo_status VdmaAsyncOutputStream::read_async(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque)
+{
+ return m_channel->transfer(buffer, user_callback, opaque);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_stream.hpp
+ * @brief Async stream object over vDMA channel
+ **/
+
+#ifndef _HAILO_VDMA_ASYNC_STREAM_HPP_
+#define _HAILO_VDMA_ASYNC_STREAM_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+#include "hailo/stream.hpp"
+
+#include "vdma/vdma_stream_base.hpp"
+#include "vdma/vdma_device.hpp"
+#include "vdma/channel/async_channel.hpp"
+
+
+namespace hailort
+{
+
+class VdmaAsyncInputStream : public VdmaInputStreamBase
+{
+public:
+ VdmaAsyncInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
+ hailo_status &status);
+ virtual ~VdmaAsyncInputStream() = default;
+
+ virtual hailo_status wait_for_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
+ virtual hailo_status write_async(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque);
+
+private:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
+ virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
+};
+
+class VdmaAsyncOutputStream : public VdmaOutputStreamBase
+{
+public:
+ VdmaAsyncOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+ hailo_status &status);
+ virtual ~VdmaAsyncOutputStream() = default;
+
+ virtual hailo_status wait_for_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
+ virtual hailo_status read_async(std::shared_ptr<DmaMappedBuffer> buffer, const TransferDoneCallback &user_callback, void *opaque = nullptr) override;
+
+private:
+ virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer);
+ virtual hailo_status read_all(MemoryView &buffer) override;
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_ASYNC_STREAM_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_buffer.cpp
- * @brief vdma buffer.
- **/
-
-#include "vdma_buffer.hpp"
-#include "control_protocol.h"
-
-namespace hailort {
-namespace vdma {
-
-CONTROL_PROTOCOL__host_buffer_info_t VdmaBuffer::get_host_buffer_info(uint32_t transfer_size)
-{
- CONTROL_PROTOCOL__host_buffer_info_t buffer_info = {};
-
- buffer_info.buffer_type = static_cast<uint8_t>((type() == vdma::VdmaBuffer::Type::SCATTER_GATHER) ?
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_EXTERNAL_DESC :
- CONTROL_PROTOCOL__HOST_BUFFER_TYPE_CCB);
- buffer_info.dma_address = dma_address();
- buffer_info.desc_page_size = desc_page_size();
- buffer_info.total_desc_count = descs_count();
- buffer_info.bytes_in_pattern = transfer_size;
-
- return buffer_info;
-}
-
-}
-}
\ No newline at end of file
+++ /dev/null
-/**\r
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.\r
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
- **/\r
-/**\r
- * @file vdma_buffer.hpp\r
- * @brief Abstract layer representing a vdma buffer (buffer that can be read/written to the device over vdma.)\r
- * The buffer can be either non-continuous with attach descriptors list (SgBuffer) or continuous buffer.\r
- **/\r
-\r
-#ifndef _HAILO_VDMA_VDMA_BUFFER_HPP_\r
-#define _HAILO_VDMA_VDMA_BUFFER_HPP_\r
-\r
-#include "os/hailort_driver.hpp"\r
-#include "vdma_descriptor_list.hpp"\r
-#include "control_protocol.h"\r
-\r
-namespace hailort {\r
-namespace vdma {\r
-\r
-class VdmaBuffer {\r
-public:\r
-\r
- enum class Type {\r
- SCATTER_GATHER,\r
- CONTINUOUS\r
- };\r
-\r
- virtual ~VdmaBuffer() = default;\r
-\r
- VdmaBuffer() = default;\r
- VdmaBuffer(const VdmaBuffer &) = delete;\r
- VdmaBuffer(VdmaBuffer &&) = default;\r
- VdmaBuffer& operator=(const VdmaBuffer &) = delete;\r
- VdmaBuffer& operator=(VdmaBuffer &&) = delete;\r
-\r
- virtual Type type() const = 0;\r
- virtual size_t size() const = 0;\r
- virtual uint64_t dma_address() const = 0;\r
- virtual uint16_t desc_page_size() const = 0;\r
- virtual uint32_t descs_count() const = 0;\r
-\r
- uint32_t descriptors_in_buffer(size_t buffer_size) const\r
- {\r
- assert(buffer_size < std::numeric_limits<uint32_t>::max());\r
- const auto page_size = desc_page_size();\r
- return static_cast<uint32_t>(DESCRIPTORS_IN_BUFFER(buffer_size, page_size));\r
- }\r
-\r
- virtual hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync = true) = 0;\r
- virtual hailo_status write(const void *buf_src, size_t count, size_t offset) = 0;\r
-\r
- virtual Expected<uint32_t> program_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,\r
- VdmaInterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular) = 0;\r
- virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,\r
- VdmaInterruptsDomain new_interrupts_domain) = 0;\r
- \r
- CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info(uint32_t transfer_size);\r
-};\r
-\r
-} /* vdma */\r
-} /* hailort */\r
-\r
-#endif /* _HAILO_VDMA_VDMA_BUFFER_HPP_ */\r
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_config_activated_core_op.cpp
+ * @brief VdmaConfigActivatedCoreOp implementation
+ **/
+
+#include "vdma/vdma_config_activated_core_op.hpp"
+#include "device_common/control.hpp"
+
+#include <chrono>
+
+
+namespace hailort
+{
+
+Expected<VdmaConfigActivatedCoreOp> VdmaConfigActivatedCoreOp::create(
+ ActiveCoreOpHolder &active_core_op_holder,
+ const std::string &core_op_name,
+ std::shared_ptr<ResourcesManager> resources_manager,
+ // hailo_activate_network_group_params_t is currently an empty holder, if anything will be added to it ,
+ // it will require a check that these params will be relevant for this one core op only.
+ const hailo_activate_network_group_params_t &network_group_params,
+ uint16_t dynamic_batch_size,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ EventPtr core_op_activated_event,
+ AccumulatorPtr deactivation_time_accumulator,
+ bool resume_pending_stream_transfers,
+ CoreOp &core_op)
+{
+ CHECK(!active_core_op_holder.is_any_active(), make_unexpected(HAILO_INVALID_OPERATION),
+ "core-op is currently active. You must deactivate before activating another core-op");
+
+ CHECK_ARG_NOT_NULL_AS_EXPECTED(deactivation_time_accumulator);
+
+ auto status = HAILO_UNINITIALIZED;
+ VdmaConfigActivatedCoreOp object(core_op_name, network_group_params, dynamic_batch_size, input_streams, output_streams,
+ std::move(resources_manager), active_core_op_holder, std::move(core_op_activated_event),
+ deactivation_time_accumulator, resume_pending_stream_transfers, core_op, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return object;
+}
+
+VdmaConfigActivatedCoreOp::VdmaConfigActivatedCoreOp(
+ const std::string &core_op_name,
+ const hailo_activate_network_group_params_t &network_group_params,
+ uint16_t dynamic_batch_size,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ std::shared_ptr<ResourcesManager> &&resources_manager,
+ ActiveCoreOpHolder &active_core_op_holder,
+ EventPtr &&core_op_activated_event,
+ AccumulatorPtr deactivation_time_accumulator,
+ bool resume_pending_stream_transfers,
+ CoreOp &core_op,
+ hailo_status &status) :
+ ActivatedCoreOp(network_group_params, input_streams, output_streams,
+ std::move(core_op_activated_event), status),
+ m_core_op_name(core_op_name),
+ m_should_reset_core_op(true),
+ m_active_core_op_holder(active_core_op_holder),
+ m_resources_manager(std::move(resources_manager)),
+ m_deactivation_time_accumulator(deactivation_time_accumulator),
+ m_keep_nn_config_during_reset(false)
+{
+ // Validate ActivatedCoreOp status
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ // We know core_op is a VdmaConfigCoreOp
+ status = core_op.activate_impl(dynamic_batch_size, resume_pending_stream_transfers);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Error activating core-op");
+ return;
+ }
+}
+
+VdmaConfigActivatedCoreOp::VdmaConfigActivatedCoreOp(VdmaConfigActivatedCoreOp &&other) noexcept :
+ ActivatedCoreOp(std::move(other)),
+ m_core_op_name(std::move(other.m_core_op_name)),
+ m_should_reset_core_op(std::exchange(other.m_should_reset_core_op, false)),
+ m_active_core_op_holder(other.m_active_core_op_holder),
+ m_resources_manager(std::move(other.m_resources_manager)),
+ m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator)),
+ m_keep_nn_config_during_reset(std::move(other.m_keep_nn_config_during_reset))
+{}
+
+VdmaConfigActivatedCoreOp::~VdmaConfigActivatedCoreOp()
+{
+ if (!m_should_reset_core_op) {
+ return;
+ }
+
+ auto status = HAILO_UNINITIALIZED;
+ const auto start_time = std::chrono::steady_clock::now();
+
+ auto core_op_ref = m_active_core_op_holder.get();
+ if (!core_op_ref.has_value()) {
+ LOGGER__ERROR("Error getting core-op (status {})", status);
+ return;
+ }
+
+ auto vdma_config_core_op = core_op_ref.value();
+
+ status = vdma_config_core_op.get().deactivate_impl(m_keep_nn_config_during_reset);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed deactivating core-op (status {})", status);
+ }
+
+ const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
+ std::chrono::steady_clock::now() - start_time).count();
+ LOGGER__INFO("Deactivating took {} ms", elapsed_time_ms);
+ m_deactivation_time_accumulator->add_data_point(elapsed_time_ms);
+}
+
+// TODO: add get_core_op_name() for better code readability?
+const std::string &VdmaConfigActivatedCoreOp::get_network_group_name() const
+{
+ // network_group name is the same as core_op name in this case.
+ // VdmaConfigActivatedCoreOp should be used only for single core ops network groups.
+ return m_core_op_name;
+}
+
+Expected<Buffer> VdmaConfigActivatedCoreOp::get_intermediate_buffer(const IntermediateBufferKey &key)
+{
+ return m_resources_manager->read_intermediate_buffer(key);
+}
+
+hailo_status VdmaConfigActivatedCoreOp::set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset)
+{
+ m_keep_nn_config_during_reset = keep_nn_config_during_reset;
+ return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_config_activated_core_op.hpp
+ * @brief Represent activated core-op from HEF
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_CORE_OP_HPP_
+#define _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_CORE_OP_HPP_
+
+#include "hailo/expected.hpp"
+
+#include "vdma/channel/boundary_channel.hpp"
+#include "core_op/active_core_op_holder.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+
+#include <vector>
+#include <map>
+#include <functional>
+
+
+namespace hailort
+{
+
+class VdmaConfigActivatedCoreOp : public ActivatedCoreOp
+{
+public:
+
+ static Expected<VdmaConfigActivatedCoreOp> create(
+ ActiveCoreOpHolder &active_core_op_holder,
+ const std::string &core_op_name,
+ std::shared_ptr<ResourcesManager> resources_manager,
+ const hailo_activate_network_group_params_t &network_group_params,
+ uint16_t dynamic_batch_size,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ EventPtr core_op_activated_event,
+ AccumulatorPtr deactivation_time_accumulator,
+ bool resume_pending_stream_transfers,
+ CoreOp &core_op);
+
+ virtual ~VdmaConfigActivatedCoreOp();
+
+ VdmaConfigActivatedCoreOp(const VdmaConfigActivatedCoreOp &other) = delete;
+ VdmaConfigActivatedCoreOp &operator=(const VdmaConfigActivatedCoreOp &other) = delete;
+ VdmaConfigActivatedCoreOp &operator=(VdmaConfigActivatedCoreOp &&other) = delete;
+ VdmaConfigActivatedCoreOp(VdmaConfigActivatedCoreOp &&other) noexcept;
+
+ virtual const std::string &get_network_group_name() const override;
+ virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override;
+ virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) override;
+
+private:
+ VdmaConfigActivatedCoreOp(
+ const std::string &core_op_name,
+ const hailo_activate_network_group_params_t &network_group_params,
+ uint16_t dynamic_batch_size,
+ std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
+ std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+ std::shared_ptr<ResourcesManager> &&resources_manager,
+ ActiveCoreOpHolder &active_core_op_holder,
+ EventPtr &&core_op_activated_event,
+ AccumulatorPtr deactivation_time_accumulator,
+ bool resume_pending_stream_transfers,
+ CoreOp &core_op,
+ hailo_status &status);
+
+ std::string m_core_op_name;
+ bool m_should_reset_core_op;
+ ActiveCoreOpHolder &m_active_core_op_holder;
+ std::shared_ptr<ResourcesManager> m_resources_manager;
+ AccumulatorPtr m_deactivation_time_accumulator;
+ bool m_keep_nn_config_during_reset;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_CORE_OP_HPP_ */
--- /dev/null
+#include "utils/profiler/tracer_macros.hpp"
+#include "vdma/vdma_config_core_op.hpp"
+#include "network_group/network_group_internal.hpp"
+#include "net_flow/pipeline/vstream_internal.hpp"
+
+
+namespace hailort
+{
+
+Expected<VdmaConfigCoreOp> VdmaConfigCoreOp::create(ActiveCoreOpHolder &active_core_op_holder,
+ const ConfigureNetworkParams &config_params,
+ std::shared_ptr<ResourcesManager> resources_manager,
+ std::shared_ptr<CoreOpMetadata> metadata)
+{
+ auto status = HAILO_UNINITIALIZED;
+
+ VdmaConfigCoreOp object(active_core_op_holder, config_params,
+ std::move(resources_manager), metadata, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return object;
+}
+
+VdmaConfigCoreOp::VdmaConfigCoreOp(ActiveCoreOpHolder &active_core_op_holder,
+ const ConfigureNetworkParams &config_params,
+ std::shared_ptr<ResourcesManager> &&resources_manager,
+ std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status) :
+ CoreOp(config_params, metadata, status),
+ m_active_core_op_holder(active_core_op_holder),
+ m_resources_manager(std::move(resources_manager))
+{}
+
+hailo_status VdmaConfigCoreOp::activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ auto status = HAILO_UNINITIALIZED;
+
+ // Check that no network is currently activated
+ CHECK(!m_active_core_op_holder.is_any_active(), HAILO_INTERNAL_FAILURE,
+ "Cant activate network because a network is already activated");
+
+ m_active_core_op_holder.set(*this);
+
+ status = m_resources_manager->set_inter_context_channels_dynamic_batch_size(dynamic_batch_size);
+ CHECK_SUCCESS(status, "Failed to set inter-context channels dynamic batch size.");
+
+ status = m_resources_manager->enable_state_machine(dynamic_batch_size);
+ CHECK_SUCCESS(status, "Failed to activate state-machine");
+
+ status = m_resources_manager->start_vdma_interrupts_dispatcher();
+ CHECK_SUCCESS(status, "Failed to start vdma interrupts");
+
+ // Low-level streams assume that the vdma channels are enabled (happens in `enable_state_machine`), and that
+ // the interrupt dispatcher is running (so they can wait for interrupts).
+ status = activate_low_level_streams(dynamic_batch_size, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status, "Failed to activate low level streams");
+
+ status = m_core_op_activated_event->signal();
+ CHECK_SUCCESS(status, "Failed to signal network activation event");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VdmaConfigCoreOp::deactivate_impl(bool keep_nn_config_during_reset)
+{
+ auto status = deactivate_host_resources();
+ CHECK_SUCCESS(status);
+
+ status = m_resources_manager->reset_state_machine(keep_nn_config_during_reset);
+ CHECK_SUCCESS(status, "Failed to reset context switch state machine");
+
+ // After the state machine has been reset the vdma channels are no longer active, so we
+ // can cancel pending async transfers, thus allowing vdma buffers linked to said transfers to be freed
+ status = m_resources_manager->cancel_pending_async_transfers();
+ CHECK_SUCCESS(status, "Failed to cancel pending async transfers");
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VdmaConfigCoreOp::deactivate_host_resources()
+{
+ // Check that network is currently activated
+ CHECK(m_active_core_op_holder.is_any_active(), HAILO_INTERNAL_FAILURE,
+ "Cant Deactivate network because no network is already activated");
+
+ // Make sure the core op we are deactivating is this object
+ auto active_core_op_ref = m_active_core_op_holder.get().value();
+ CHECK(this == std::addressof(active_core_op_ref.get()), HAILO_INTERNAL_FAILURE,
+ "Trying to deactivate different network goup");
+
+ m_active_core_op_holder.clear();
+
+ m_core_op_activated_event->reset();
+
+ auto status = deactivate_low_level_streams();
+ CHECK_SUCCESS(status, "Failed to deactivate low level streams");
+
+ // After disabling the vdma interrupts, we may still get some interrupts. On HRT-9430 we need to clean them.
+ status = m_resources_manager->stop_vdma_interrupts_dispatcher();
+ CHECK_SUCCESS(status, "Failed to stop vdma interrupts");
+
+ return HAILO_SUCCESS;
+}
+
+Expected<std::unique_ptr<ActivatedNetworkGroup>> VdmaConfigCoreOp::create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
+ bool resume_pending_stream_transfers)
+{
+ auto start_time = std::chrono::steady_clock::now();
+ auto activated_net_group = VdmaConfigActivatedCoreOp::create(
+ m_active_core_op_holder, name(), m_resources_manager, network_group_params, dynamic_batch_size,
+ m_input_streams, m_output_streams, m_core_op_activated_event, m_deactivation_time_accumulator,
+ resume_pending_stream_transfers, *this);
+ const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
+ std::chrono::steady_clock::now() - start_time).count();
+ CHECK_EXPECTED(activated_net_group);
+
+ LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and"
+ " thus the network is not fully activated yet.", name(), elapsed_time_ms);
+ m_activation_time_accumulator->add_data_point(elapsed_time_ms);
+
+ std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr =
+ make_unique_nothrow<VdmaConfigActivatedCoreOp>(activated_net_group.release());
+ CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ return activated_net_group_ptr;
+}
+
+Expected<hailo_stream_interface_t> VdmaConfigCoreOp::get_default_streams_interface()
+{
+ return m_resources_manager->get_default_streams_interface();
+}
+
+bool VdmaConfigCoreOp::is_scheduled() const
+{
+ // Scheduler allowed only when working with VDevice and scheduler enabled.
+ return false;
+}
+
+hailo_status VdmaConfigCoreOp::set_scheduler_timeout(const std::chrono::milliseconds &/*timeout*/, const std::string &/*network_name*/)
+{
+ LOGGER__ERROR("Setting scheduler's timeout is only allowed when working with VDevice and scheduler enabled");
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status VdmaConfigCoreOp::set_scheduler_threshold(uint32_t /*threshold*/, const std::string &/*network_name*/)
+{
+ LOGGER__ERROR("Setting scheduler's threshold is only allowed when working with VDevice and scheduler enabled");
+ return HAILO_INVALID_OPERATION;
+}
+
+hailo_status VdmaConfigCoreOp::set_scheduler_priority(uint8_t /*priority*/, const std::string &/*network_name*/)
+{
+ LOGGER__ERROR("Setting scheduler's priority is only allowed when working with VDevice and scheduler enabled");
+ return HAILO_INVALID_OPERATION;
+}
+
+Expected<std::shared_ptr<LatencyMetersMap>> VdmaConfigCoreOp::get_latency_meters()
+{
+ auto latency_meters = m_resources_manager->get_latency_meters();
+ return make_shared_nothrow<LatencyMetersMap>(latency_meters);
+}
+
+Expected<vdma::BoundaryChannelPtr> VdmaConfigCoreOp::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
+{
+ return m_resources_manager->get_boundary_vdma_channel_by_stream_name(stream_name);
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_config_core_op.hpp
+ * @brief Represent core-op from HEF file that can be activated
+ *
+ * This core-op can be used for both single or multi context core-ops but for PCIE only
+ **/
+
+#ifndef _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_CORE_OP_HPP_
+#define _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_CORE_OP_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/network_group.hpp"
+#include "hailo/hailort_defaults.hpp"
+
+#include "common/utils.hpp"
+
+#include "vdma/channel/boundary_channel.hpp"
+#include "core_op/resource_manager/resource_manager.hpp"
+#include "vdma/vdma_config_activated_core_op.hpp"
+#include "core_op/active_core_op_holder.hpp"
+
+#include "control_protocol.h"
+#include <cstdint>
+#include <assert.h>
+#include <map>
+#include <set>
+
+
+namespace hailort
+{
+
+
+class VdmaConfigCoreOp : public CoreOp
+{
+public:
+ static Expected<VdmaConfigCoreOp> create(ActiveCoreOpHolder &active_core_op_holder,
+ const ConfigureNetworkParams &config_params,
+ std::shared_ptr<ResourcesManager> resources_managers,
+ std::shared_ptr<CoreOpMetadata> metadata);
+
+ std::shared_ptr<ResourcesManager> &get_resources_manager()
+ {
+ return m_resources_manager;
+ }
+
+ // Functions to activate and deactivate core ops for scheduler - dont create ActivatedNetworkGroup objects
+ // Note: Care should be taken when calling activate_impl with resume_pending_stream_transfers = true.
+ // If an output stream has outstanding transfers, and the NG is deactivated (via deactivate_impl) before they
+ // have been completed, then these pending transfers may be overwritten upon channel activation.
+ // Hence, when setting resume_pending_stream_transfers = true, the caller must validate that all pending
+ // reads have been received (i.e. an int has been raised for this transfer)
+ virtual hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ // Will first deactivate host resources (via deactivate_host_resources) and then reset the core-op on the fw
+ virtual hailo_status deactivate_impl(bool keep_nn_config_during_reset) override;
+ // Deactivate all resources related to the core-op on the host, but without resetting the core-op on the fw
+ hailo_status deactivate_host_resources();
+
+ virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
+ const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
+ bool resume_pending_stream_transfers) override;
+
+ virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
+
+ virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
+ virtual Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(
+ const std::string &stream_name) override;
+
+ virtual bool is_scheduled() const override;
+ virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
+ virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override;
+
+ virtual ~VdmaConfigCoreOp() = default;
+ VdmaConfigCoreOp(const VdmaConfigCoreOp &other) = delete;
+ VdmaConfigCoreOp &operator=(const VdmaConfigCoreOp &other) = delete;
+ VdmaConfigCoreOp &operator=(VdmaConfigCoreOp &&other) = delete;
+ VdmaConfigCoreOp(VdmaConfigCoreOp &&other) noexcept : CoreOp(std::move(other)),
+ m_active_core_op_holder(other.m_active_core_op_holder),
+ m_resources_manager(std::move(other.m_resources_manager))
+ {}
+
+private:
+ VdmaConfigCoreOp(ActiveCoreOpHolder &active_core_op_holder,
+ const ConfigureNetworkParams &config_params,
+ std::shared_ptr<ResourcesManager> &&resources_manager,
+ std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status);
+
+ ActiveCoreOpHolder &m_active_core_op_holder;
+ std::shared_ptr<ResourcesManager> m_resources_manager;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_CORE_OP_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file vdma_config_manager.cpp
+ * @brief Vdma config manager implementation
+ **/
+
+#include "vdma_config_manager.hpp"
+#include "hailo/hailort.h"
+
+namespace hailort
+{
+
+hailo_status VdmaConfigManager::switch_core_op(std::shared_ptr<VdmaConfigCoreOp> current_active_core_op,
+ std::shared_ptr<VdmaConfigCoreOp> next_core_op, const uint16_t batch_size, bool resume_pending_stream_transfers)
+{
+ static const auto RESET_NN_CONFIG = false;
+ CHECK((nullptr != current_active_core_op) || (nullptr != next_core_op), HAILO_INVALID_ARGUMENT);
+
+ if (nullptr == current_active_core_op) {
+ // Activate first core-op
+ return next_core_op->activate_impl(batch_size, resume_pending_stream_transfers);
+ } else if (nullptr == next_core_op) {
+ // Deactivate last core-op
+ return current_active_core_op->deactivate_impl(RESET_NN_CONFIG);
+ }
+
+ // We're switching from current_active_core_op to next_core_op.
+ // Deactivate the current core-op on the host, meaning the fw state machine won't be reset.
+ // This will be handled by activating the next core-op.
+ auto status = current_active_core_op->deactivate_host_resources();
+ CHECK_SUCCESS(status, "Failed deactivating current core-op");
+
+ // TODO: In mercury we need to reset after deactivate. This will be fixed in MSW-762 and the "if" will be removed
+ // when we make the nn_manager responsible to reset the nn-core.
+ if (Device::Type::INTEGRATED == current_active_core_op->get_resources_manager()->get_device().get_type()) {
+ status = current_active_core_op->get_resources_manager()->reset_state_machine(RESET_NN_CONFIG);
+ CHECK_SUCCESS(status, "Failed to reset state machine in switch core-op");
+ }
+
+ // Switch from the current core-op to the next core-op. I.e. current core-op will be deactivated and
+ // next core-op will be activated
+ status = next_core_op->activate_impl(batch_size, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status, "Failed activating next core-op");
+
+ // Current core-op is now deactivated, so we can cancel pending async transfers
+ status = current_active_core_op->get_resources_manager()->cancel_pending_async_transfers();
+ CHECK_SUCCESS(status, "Failed canceling pending async transfers from previous core-op");
+
+ return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_config_manager.hpp
+ * @brief Manager of HEF parsing and vdma-core-op resources for Pcie devices (both single and multi context)
+ *
+ **/
+
+#ifndef HAILO_VDMA_CONFIG_MANAGER_HPP_
+#define HAILO_VDMA_CONFIG_MANAGER_HPP_
+
+#include "hailo/hailort.h"
+
+#include "common/utils.hpp"
+
+#include "vdma/vdma_config_core_op.hpp"
+
+
+namespace hailort
+{
+
+class VdmaConfigManager final
+{
+public:
+ VdmaConfigManager() = delete;
+
+ static hailo_status switch_core_op(std::shared_ptr<VdmaConfigCoreOp> current_active_core_op,
+ std::shared_ptr<VdmaConfigCoreOp> next_core_op, const uint16_t batch_size, bool resume_pending_stream_transfers);
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_VDMA_CONFIG_MANAGER_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_device.cpp
+ * @brief TODO: brief
+ *
+ * TODO: doc
+ **/
+
+#include "vdma/vdma_device.hpp"
+#include "vdma/memory/descriptor_list.hpp"
+#include "vdma/vdma_config_manager.hpp"
+#include "vdma/pcie/pcie_device.hpp"
+#include "vdma/integrated/integrated_device.hpp"
+#include "device_common/control.hpp"
+#include "core_op/resource_manager/resource_manager_builder.hpp"
+#include "core_op/core_op.hpp"
+
+#include <new>
+#include <algorithm>
+
+
+namespace hailort
+{
+
+#ifndef HAILO_EMULATOR
+static constexpr std::chrono::milliseconds DEFAULT_TIMEOUT(1000);
+#else /* ifndef HAILO_EMULATOR */
+static constexpr std::chrono::milliseconds DEFAULT_TIMEOUT(50000);
+#endif /* ifndef HAILO_EMULATOR */
+
+VdmaDevice::VdmaDevice(HailoRTDriver &&driver, Device::Type type, const std::string &device_id) :
+ DeviceBase::DeviceBase(type),
+ m_driver(std::move(driver)), m_is_configured(false)
+{
+ activate_notifications(device_id);
+}
+
+Expected<std::unique_ptr<VdmaDevice>> VdmaDevice::create(const std::string &device_id)
+{
+ const bool DONT_LOG_ON_FAILURE = false;
+ if (IntegratedDevice::DEVICE_ID == device_id) {
+ auto device = IntegratedDevice::create();
+ CHECK_EXPECTED(device);;
+ return std::unique_ptr<VdmaDevice>(device.release());
+ }
+ else if (auto pcie_info = PcieDevice::parse_pcie_device_info(device_id, DONT_LOG_ON_FAILURE)) {
+ auto device = PcieDevice::create(pcie_info.release());
+ CHECK_EXPECTED(device);
+ return std::unique_ptr<VdmaDevice>(device.release());
+ }
+ else {
+ LOGGER__ERROR("Invalid device id {}", device_id);
+ return make_unexpected(HAILO_INVALID_ARGUMENT);
+ }
+}
+
+hailo_status VdmaDevice::wait_for_wakeup()
+{
+ return HAILO_SUCCESS;
+}
+
+Expected<D2H_EVENT_MESSAGE_t> VdmaDevice::read_notification()
+{
+ auto notification_buffer = m_driver.read_notification();
+ if (!notification_buffer.has_value()) {
+ return make_unexpected(notification_buffer.status());
+ }
+
+ D2H_EVENT_MESSAGE_t notification;
+ CHECK_AS_EXPECTED(sizeof(notification) >= notification_buffer->size(), HAILO_GET_D2H_EVENT_MESSAGE_FAIL,
+ "buffer len is not valid = {}", notification_buffer->size());
+ memcpy(¬ification, notification_buffer->data(), notification_buffer->size());
+ return notification;
+}
+
+hailo_status VdmaDevice::disable_notifications()
+{
+ return m_driver.disable_notifications();
+}
+
+hailo_status VdmaDevice::fw_interact_impl(uint8_t *request_buffer, size_t request_size,
+ uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id)
+{
+ uint8_t request_md5[PCIE_EXPECTED_MD5_LENGTH];
+ MD5_CTX ctx;
+
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, request_buffer, request_size);
+ MD5_Final(request_md5, &ctx);
+
+ uint8_t response_md5[PCIE_EXPECTED_MD5_LENGTH];
+ uint8_t expected_response_md5[PCIE_EXPECTED_MD5_LENGTH];
+
+ auto status = m_driver.fw_control(request_buffer, request_size, request_md5,
+ response_buffer, response_size, response_md5,
+ DEFAULT_TIMEOUT, cpu_id);
+ CHECK_SUCCESS(status, "Failed to send fw control");
+
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, response_buffer, (*response_size));
+ MD5_Final(expected_response_md5, &ctx);
+
+ auto memcmp_result = memcmp(expected_response_md5, response_md5, sizeof(response_md5));
+ CHECK(0 == memcmp_result, HAILO_INTERNAL_FAILURE, "MD5 validation of control response failed.");
+
+ return HAILO_SUCCESS;
+}
+
+Expected<ConfiguredNetworkGroupVector> VdmaDevice::add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params)
+{
+ auto status = mark_as_used();
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ if (!m_is_configured) {
+ // TODO: Do we need this control after fixing HRT-7519?
+ // Reset context_switch state machine - it may have been in an active state if a previous VdmaDevice
+ // wasn't dtor'd (due to SIGKILL for example)
+ static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
+ status = Control::reset_context_switch_state_machine(*this, REMOVE_NN_CONFIG_DURING_RESET);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // In case of mercury need to reset nn core before activating network group to clear prior nn core state
+ if (Device::Type::INTEGRATED == get_type()) {
+ // On core device, the nn_manager is not responsible to reset the nn-core so
+ // we use the SCU control for that.
+ status = reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ }
+
+ status = Control::clear_configured_apps(*this);
+ CHECK_SUCCESS_AS_EXPECTED(status, "Failed to clear configured network groups with status {}", status);
+
+ assert(nullptr == m_vdma_interrupts_dispatcher);
+ auto interrupts_dispatcher = vdma::InterruptsDispatcher::create(std::ref(m_driver));
+ CHECK_EXPECTED(interrupts_dispatcher);
+ m_vdma_interrupts_dispatcher = interrupts_dispatcher.release();
+
+ m_is_configured = true;
+ }
+
+ auto added_network_groups = create_networks_group_vector(hef, configure_params);
+ CHECK_EXPECTED(added_network_groups);
+
+ return added_network_groups;
+}
+
+// TODO: HRT-9551 Create CoreOpMetadata and CoreOp in the same loop
+Expected<std::shared_ptr<ConfiguredNetworkGroup>> VdmaDevice::create_configured_network_group(
+ std::vector<std::shared_ptr<CoreOpMetadata>> &core_ops_metadata,
+ Hef &hef, const ConfigureNetworkParams &config_params,
+ uint8_t current_core_op_index)
+{
+ std::vector<std::shared_ptr<CoreOp>> core_ops;
+ core_ops.reserve(core_ops_metadata.size());
+
+ // TODO: keep metadata per core_op (HRT-9551)
+ // TODO: HRT-8875 support multiple core ops
+ assert(core_ops_metadata.size() == 1);
+ auto core_op_metadata = core_ops_metadata[0];
+
+ /* build HEF supported features */
+ auto resource_manager = ResourcesManagerBuilder::build(current_core_op_index,
+ *this, get_driver(), config_params, core_op_metadata, hef.pimpl->get_device_arch());
+ CHECK_EXPECTED(resource_manager);
+
+
+ auto core_op = VdmaConfigCoreOp::create(m_active_core_op_holder, config_params,
+ resource_manager.release(), core_op_metadata);
+
+ auto core_op_ptr = make_shared_nothrow<VdmaConfigCoreOp>(core_op.release());
+ CHECK_AS_EXPECTED(nullptr != core_op_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+ // TODO: move this func into VdmaConfigCoreOp c'tor
+ auto status = core_op_ptr->create_streams_from_config_params(*this);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // Check that all boundary streams were created
+ status = hef.pimpl->validate_boundary_streams_were_created(core_op_metadata->core_op_name(), core_op_ptr);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ core_ops.emplace_back(core_op_ptr);
+ m_core_ops.emplace_back(core_op_ptr);
+
+ // TODO: HRT-8875
+ auto net_flow_ops = hef.pimpl->post_process_ops(core_op_metadata->core_op_name());
+ auto network_group_expected = ConfiguredNetworkGroupBase::create(config_params, std::move(core_ops), std::move(net_flow_ops));
+ CHECK_EXPECTED(network_group_expected);
+ auto network_group_ptr = network_group_expected.release();
+
+ return Expected<std::shared_ptr<ConfiguredNetworkGroup>>(network_group_ptr);
+}
+
+Expected<size_t> VdmaDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
+{
+ size_t read_bytes = 0;
+ hailo_status status = HAILO_UNINITIALIZED;
+ status = m_driver.read_log(buffer.data(), buffer.size(), &read_bytes, cpu_id);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ return read_bytes;
+}
+
+void VdmaDevice::increment_control_sequence()
+{
+ // To support multiprocess the sequence must remain 0 which is a number the FW ignores.
+ // Otherwise the FW might get the same sequence number from several processes which
+ // cause the command to be discarded.
+ m_control_sequence = 0;
+}
+
+hailo_reset_device_mode_t VdmaDevice::get_default_reset_mode()
+{
+ return HAILO_RESET_DEVICE_MODE_SOFT;
+}
+
+uint16_t VdmaDevice::get_default_desc_page_size() const
+{
+ return m_driver.calc_desc_page_size(vdma::DEFAULT_DESC_PAGE_SIZE);
+}
+
+hailo_status VdmaDevice::mark_as_used()
+{
+ return m_driver.mark_as_used();
+}
+
+ExpectedRef<vdma::InterruptsDispatcher> VdmaDevice::get_vdma_interrupts_dispatcher()
+{
+ CHECK_AS_EXPECTED(m_vdma_interrupts_dispatcher, HAILO_INTERNAL_FAILURE, "vDMA interrupt dispatcher wasn't created");
+ return std::ref(*m_vdma_interrupts_dispatcher);
+}
+
+VdmaDevice::~VdmaDevice()
+{
+ auto status = stop_notification_fetch_thread();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__WARNING("Stopping notification thread ungracefully");
+ }
+ if (m_is_configured) {
+ status = Control::clear_configured_apps(*this);
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to clear configured core-ops with status {}", status);
+ }
+ }
+}
+
+Expected<ConfiguredNetworkGroupVector> VdmaDevice::create_networks_group_vector(Hef &hef, const NetworkGroupsParamsMap &configure_params)
+{
+ auto partial_clusters_layout_bitmap_exp = Control::get_partial_clusters_layout_bitmap(*this);
+ CHECK_EXPECTED(partial_clusters_layout_bitmap_exp);
+ auto partial_clusters_layout_bitmap = partial_clusters_layout_bitmap_exp.release();
+
+ auto &hef_net_groups = hef.pimpl->network_groups();
+ auto configure_params_copy = configure_params;
+ ConfiguredNetworkGroupVector added_network_groups;
+ // TODO: can be optimized (add another loop the allocate the network group we're adding)
+ added_network_groups.reserve(hef_net_groups.size());
+ for (const auto &hef_net_group : hef_net_groups) {
+ const std::string &network_group_name = HefUtils::get_network_group_name(*hef_net_group, SupportedFeatures());
+ const auto prev_core_op_count = m_core_ops.size();
+ auto current_core_op_index = static_cast<uint8_t>(prev_core_op_count);
+
+ /* If NG params are present, use them
+ If no configure params are given, use default*/
+ ConfigureNetworkParams config_params{};
+ if (contains(configure_params, network_group_name)) {
+ config_params = configure_params_copy.at(network_group_name);
+ configure_params_copy.erase(network_group_name);
+ } else if (configure_params.empty()) {
+ auto stream_interface = get_default_streams_interface();
+ CHECK_EXPECTED(stream_interface);
+ auto config_params_exp = hef.create_configure_params(stream_interface.value(), network_group_name);
+ CHECK_EXPECTED(config_params_exp);
+ config_params = config_params_exp.release();
+ } else {
+ continue;
+ }
+
+ /* Validate batch size (network group batch size vs network batch size) */
+ auto status = Hef::Impl::update_network_batch_size(config_params);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ auto core_ops_metadata_ptrs = create_core_ops_metadata(hef, network_group_name, partial_clusters_layout_bitmap);
+ CHECK_EXPECTED(core_ops_metadata_ptrs);
+
+ auto network_group_expected = create_configured_network_group(core_ops_metadata_ptrs.value(),
+ hef, config_params, current_core_op_index);
+ CHECK_EXPECTED(network_group_expected);
+ auto network_group_ptr = network_group_expected.release();
+
+ added_network_groups.emplace_back(network_group_ptr);
+ m_network_groups.push_back(network_group_ptr);
+ }
+
+ std::string unmatched_keys = "";
+ for (const auto &pair : configure_params_copy) {
+ unmatched_keys.append(" ");
+ unmatched_keys.append(pair.first);
+ }
+ CHECK_AS_EXPECTED(unmatched_keys.size() == 0, HAILO_INVALID_ARGUMENT,
+ "Some network group names in the configuration are not found in the hef file:{}", unmatched_keys);
+
+ return added_network_groups;
+}
+
+Expected<std::vector<std::shared_ptr<CoreOpMetadata>>> VdmaDevice::create_core_ops_metadata(Hef &hef, const std::string &network_group_name, uint32_t partial_clusters_layout_bitmap)
+{
+ auto hef_core_ops = hef.pimpl->core_ops(network_group_name);
+ assert(1 == hef_core_ops.size());
+
+ std::vector<std::shared_ptr<CoreOpMetadata>> core_ops_metadata_ptrs;
+ core_ops_metadata_ptrs.reserve(hef_core_ops.size());
+ const auto prev_core_ops_count = m_core_ops.size();
+ const auto total_core_ops_count = prev_core_ops_count + hef_core_ops.size();
+ CHECK_AS_EXPECTED(CONTROL_PROTOCOL__MAX_CONTEXT_SWITCH_APPLICATIONS >= total_core_ops_count,
+ HAILO_INVALID_OPERATION,
+ "Can't add {} core-ops from HEF. Currently {} core-ops are configured; maximum allowed core-ops: {}.",
+ hef_core_ops.size(), prev_core_ops_count, CONTROL_PROTOCOL__MAX_CONTEXT_SWITCH_APPLICATIONS);
+
+ auto hef_arch = hef.pimpl->get_device_arch();
+ auto device_arch = get_architecture();
+ CHECK_EXPECTED(device_arch);
+
+ for (const auto &hef_core_op : hef_core_ops) {
+ auto expected_partial_core_op = Hef::Impl::get_core_op_per_arch(hef_core_op, hef_arch, device_arch.value(),
+ partial_clusters_layout_bitmap);
+ CHECK_EXPECTED(expected_partial_core_op);
+ auto partial_core_op = expected_partial_core_op.release();
+ auto status = Hef::Impl::validate_core_op_unique_layer_names(*partial_core_op);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ // TODO: keep metadata per core_op (HRT-9551)
+ // TODO: decide about core_op names - align with the Compiler
+ auto core_op_metadata = hef.pimpl->get_core_op_metadata(network_group_name, partial_clusters_layout_bitmap);
+ CHECK_EXPECTED(core_op_metadata);
+
+ auto core_op_metadata_ptr = make_shared_nothrow<CoreOpMetadata>(core_op_metadata.release());
+ CHECK_AS_EXPECTED(nullptr != core_op_metadata_ptr, HAILO_OUT_OF_HOST_MEMORY);
+ core_ops_metadata_ptrs.emplace_back(core_op_metadata_ptr);
+ }
+
+ return core_ops_metadata_ptrs;
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_device.hpp
+ * @brief Base class for devices that uses vdma and comunicate using HailoRTDriver
+ *
+ **/
+
+#ifndef HAILO_VDMA_DEVICE_H_
+#define HAILO_VDMA_DEVICE_H_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "device_common/device_internal.hpp"
+#include "network_group/network_group_internal.hpp"
+#include "os/hailort_driver.hpp"
+#include "vdma/channel/interrupts_dispatcher.hpp"
+
+
+namespace hailort
+{
+
+class VdmaDevice : public DeviceBase {
+public:
+ static Expected<std::unique_ptr<VdmaDevice>> create(const std::string &device_id);
+
+ virtual ~VdmaDevice();
+
+ virtual hailo_status wait_for_wakeup() override;
+ virtual void increment_control_sequence() override;
+ virtual hailo_reset_device_mode_t get_default_reset_mode() override;
+ uint16_t get_default_desc_page_size() const;
+
+ hailo_status mark_as_used();
+ virtual Expected<size_t> read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id) override;
+
+ HailoRTDriver &get_driver() {
+ return std::ref(m_driver);
+ };
+
+ ExpectedRef<vdma::InterruptsDispatcher> get_vdma_interrupts_dispatcher();
+
+protected:
+ VdmaDevice(HailoRTDriver &&driver, Type type, const std::string &device_id);
+
+ virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() override;
+ virtual hailo_status disable_notifications() override;
+ virtual hailo_status fw_interact_impl(uint8_t *request_buffer, size_t request_size,
+ uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id) override;
+ virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) override;
+
+ HailoRTDriver m_driver;
+ std::vector<std::shared_ptr<CoreOp>> m_core_ops;
+ std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups; // TODO: HRT-9547 - Remove when ConfiguredNetworkGroup will be kept in global context
+
+ // The vdma interrupts dispatcher contains a callback with a reference to the current activated network group
+ // (reference to the ResourcesManager). Hence, it must be destructed before the networks groups are destructed.
+ std::unique_ptr<vdma::InterruptsDispatcher> m_vdma_interrupts_dispatcher;
+
+ ActiveCoreOpHolder m_active_core_op_holder;
+ bool m_is_configured;
+
+private:
+ Expected<std::shared_ptr<ConfiguredNetworkGroup>> create_configured_network_group(
+ std::vector<std::shared_ptr<CoreOpMetadata>> &core_ops,
+ Hef &hef, const ConfigureNetworkParams &config_params,
+ uint8_t network_group_index);
+ Expected<ConfiguredNetworkGroupVector> create_networks_group_vector(Hef &hef, const NetworkGroupsParamsMap &configure_params);
+ Expected<std::vector<std::shared_ptr<CoreOpMetadata>>> create_core_ops_metadata(Hef &hef, const std::string &network_group_name,
+ uint32_t partial_clusters_layout_bitmap);
+};
+
+} /* namespace hailort */
+
+#endif /* HAILO_VDMA_DEVICE_H_ */
+++ /dev/null
-#include "vdma_mapped_buffer_impl.hpp"
-
-namespace hailort {
-namespace vdma {
-
-#if defined(__linux__) || defined(_MSC_VER)
-
-Expected<VdmaMappedBufferImpl> VdmaMappedBufferImpl::allocate_vdma_buffer(HailoRTDriver &driver, size_t required_size)
-{
- // Check if driver should be allocated from driver or from user
- if (driver.allocate_driver_buffer()) {
- auto driver_buffer_handle = driver.vdma_low_memory_buffer_alloc(required_size);
- CHECK_EXPECTED(driver_buffer_handle);
-
- uintptr_t driver_buff_handle = driver_buffer_handle.release();
-
- auto mapped_buffer = MmapBuffer<void>::create_file_map(required_size, driver.fd(), driver_buff_handle);
- CHECK_EXPECTED(mapped_buffer);
-
- return VdmaMappedBufferImpl(mapped_buffer.release(), driver_buff_handle, driver);
- }
- else {
- auto mapped_buffer = MmapBuffer<void>::create_shared_memory(required_size);
- CHECK_EXPECTED(mapped_buffer);
- return VdmaMappedBufferImpl(mapped_buffer.release(), HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE, driver);
- }
-}
-
-VdmaMappedBufferImpl::~VdmaMappedBufferImpl()
-{
- if (m_mapped_buffer) {
- if (HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE != m_driver_mapped_buffer_identifier) {
- m_driver.vdma_low_memory_buffer_free(m_driver_mapped_buffer_identifier);
- }
- }
-}
-
-#elif defined(__QNX__)
-
-#include <fcntl.h>
-
-const int VdmaMappedBufferImpl::INVALID_FD = -1;
-const shm_handle_t VdmaMappedBufferImpl::INVALID_HANDLE = (shm_handle_t)-1;
-const char* VdmaMappedBufferImpl::VDMA_BUFFER_TYPE_MEMORY_NAME = "/memory/below4G/ram/below1G";
-
-Expected<VdmaMappedBufferImpl> VdmaMappedBufferImpl::allocate_vdma_buffer(HailoRTDriver &driver, size_t required_size)
-{
- // Desctructor of type_mem_fd will close fd
- FileDescriptor type_mem_fd(posix_typed_mem_open(VDMA_BUFFER_TYPE_MEMORY_NAME, O_RDWR, POSIX_TYPED_MEM_ALLOCATE));
- if (INVALID_FD == type_mem_fd) {
- LOGGER__ERROR("Error getting fd from typed memory of type {}, errno {}\n", VDMA_BUFFER_TYPE_MEMORY_NAME,
- errno);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-
- vdma_mapped_buffer_driver_identifier driver_buff_handle;
- driver_buff_handle.shm_fd = shm_open(SHM_ANON, O_RDWR | O_CREAT, 0777);
- CHECK_AS_EXPECTED(INVALID_FD != driver_buff_handle.shm_fd, HAILO_INTERNAL_FAILURE,
- "Error creating shm object, errno is: {}", errno);
-
- // backs the shared memory object with physical memory
- int err = shm_ctl(driver_buff_handle.shm_fd, SHMCTL_ANON | SHMCTL_TYMEM, (uint64_t)type_mem_fd,
- required_size);
- if (-1 == err) {
- LOGGER__ERROR("Error backing shm object in physical memory, errno is: {}", errno);
- close(driver_buff_handle.shm_fd);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-
- // Create shared memory handle to send to driver
- err = shm_create_handle(driver_buff_handle.shm_fd, driver.resource_manager_pid(), O_RDWR,
- &driver_buff_handle.shm_handle, 0);
- if (0 != err) {
- LOGGER__ERROR("Error creating shm object handle, errno is: {}", errno);
- close(driver_buff_handle.shm_fd);
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-
- void *address = mmap(0, required_size, PROT_WRITE | PROT_READ | PROT_NOCACHE, MAP_SHARED, driver_buff_handle.shm_fd, 0);
- if (MAP_FAILED == address) {
- LOGGER__ERROR("Failed to mmap buffer with errno:{}", errno);
- shm_delete_handle(driver_buff_handle.shm_handle);
- close(driver_buff_handle.shm_fd);
- return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);
- }
-
- return VdmaMappedBufferImpl(address, required_size, driver_buff_handle.shm_handle, driver_buff_handle.shm_fd, driver);
-}
-
-VdmaMappedBufferImpl::~VdmaMappedBufferImpl()
-{
- if (nullptr != m_address) {
- if (0 != munmap(m_address, m_length)) {
- LOGGER__ERROR("Error unmapping memory at address {}, Errno: {}", m_address, errno);
- }
-
- if (INVALID_FD != m_driver_mapped_buffer_identifier.shm_fd) {
- if (0 != close(m_driver_mapped_buffer_identifier.shm_fd)) {
- LOGGER__ERROR("Error closing shared memory fd, Errno: {}", errno);
- }
- }
- }
-}
-
-#else
-#error "unsupported platform!"
-#endif // defined(__linux__) || defined(_MSC_VER)
-
-} /* namespace vdma */
-} /* namespace hailort */
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-
-#ifndef _HAILO_VDMA_MAPPED_BUFFER_IMPL_HPP_
-#define _HAILO_VDMA_MAPPED_BUFFER_IMPL_HPP_
-
-#include "os/mmap_buffer.hpp"
-#include "os/hailort_driver.hpp"
-#include "hailo/expected.hpp"
-
-namespace hailort {
-namespace vdma {
-
-#if defined(__linux__) || defined(_MSC_VER)
-
-class VdmaMappedBufferImpl final {
-public:
- VdmaMappedBufferImpl(HailoRTDriver &driver) : m_mapped_buffer(),
- m_driver_mapped_buffer_identifier(HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE), m_driver(driver) {}
-
- ~VdmaMappedBufferImpl();
-
- VdmaMappedBufferImpl(VdmaMappedBufferImpl &&other) noexcept :
- m_mapped_buffer(std::move(other.m_mapped_buffer)),
- m_driver_mapped_buffer_identifier(std::exchange(other.m_driver_mapped_buffer_identifier, HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE)),
- m_driver(other.m_driver)
- {}
-
- VdmaMappedBufferImpl(const VdmaMappedBufferImpl &other) = delete;
- VdmaMappedBufferImpl &operator=(const VdmaMappedBufferImpl &other) = delete;
- VdmaMappedBufferImpl &operator=(VdmaMappedBufferImpl &&other) = delete;
-
- void* get() { return m_mapped_buffer.get(); }
-
- vdma_mapped_buffer_driver_identifier& get_mapped_buffer_identifier() { return m_driver_mapped_buffer_identifier; }
-
- explicit operator bool()
- {
- if (m_mapped_buffer)
- return true;
- return false;
- }
-
- static Expected<VdmaMappedBufferImpl> allocate_vdma_buffer(HailoRTDriver &driver, size_t required_size);
-
-private:
- VdmaMappedBufferImpl(MmapBuffer<void>&& mapped_buffer, vdma_mapped_buffer_driver_identifier driver_handle, HailoRTDriver &driver) :
- m_mapped_buffer(std::move(mapped_buffer)), m_driver_mapped_buffer_identifier(driver_handle), m_driver(driver) {}
-
- MmapBuffer<void> m_mapped_buffer;
- vdma_mapped_buffer_driver_identifier m_driver_mapped_buffer_identifier;
- HailoRTDriver &m_driver;
-};
-
-#elif defined(__QNX__)
-
-class VdmaMappedBufferImpl final {
-public:
- VdmaMappedBufferImpl(HailoRTDriver &driver): m_address(nullptr), m_length(0), m_driver(driver) {
- m_driver_mapped_buffer_identifier.shm_handle = INVALID_HANDLE;
- m_driver_mapped_buffer_identifier.shm_fd = INVALID_FD;
- }
-
- ~VdmaMappedBufferImpl();
-
- VdmaMappedBufferImpl(VdmaMappedBufferImpl &&other) noexcept : m_address(std::exchange(other.m_address, nullptr)),
- m_length(std::exchange(other.m_length, 0)), m_driver(other.m_driver)
- {
- m_driver_mapped_buffer_identifier.shm_handle = std::exchange(other.m_driver_mapped_buffer_identifier.shm_handle, INVALID_HANDLE);
- m_driver_mapped_buffer_identifier.shm_fd = std::exchange(other.m_driver_mapped_buffer_identifier.shm_fd, INVALID_FD);
-
- }
-
- VdmaMappedBufferImpl(const VdmaMappedBufferImpl &other) = delete;
- VdmaMappedBufferImpl &operator=(const VdmaMappedBufferImpl &other) = delete;
- VdmaMappedBufferImpl &operator=(VdmaMappedBufferImpl &&other) = delete;
-
- void* get() { return m_address; }
-
- vdma_mapped_buffer_driver_identifier& get_mapped_buffer_identifier() { return m_driver_mapped_buffer_identifier; }
-
- explicit operator bool()
- {
- return (nullptr != m_address);
- }
-
- static Expected<VdmaMappedBufferImpl> allocate_vdma_buffer(HailoRTDriver &driver, size_t required_size);
-
-private:
- VdmaMappedBufferImpl(void *addr, size_t length, shm_handle_t shm_handle, int shm_fd, HailoRTDriver &driver) :
- m_address(addr), m_length(length), m_driver(driver)
- {
- m_driver_mapped_buffer_identifier.shm_handle = shm_handle;
- m_driver_mapped_buffer_identifier.shm_fd = shm_fd;
- }
-
- static const int INVALID_FD;
- static const shm_handle_t INVALID_HANDLE;
- static const char* VDMA_BUFFER_TYPE_MEMORY_NAME;
-
- void *m_address;
- size_t m_length;
- vdma_mapped_buffer_driver_identifier m_driver_mapped_buffer_identifier;
- HailoRTDriver &m_driver;
-};
-
-#else
-#error "unsupported platform!"
-#endif // defined(__linux__) || defined(_MSC_VER)
-
-} /* namespace vdma */
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_MAPPED_BUFFER_IMPL_HPP_ */
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_stream.cpp
+ **/
+
+#include "hailo/hailort_common.hpp"
+
+#include "vdma/vdma_stream.hpp"
+
+
+namespace hailort
+{
+
+VdmaInputStream::VdmaInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
+ const LayerInfo &edge_layer, EventPtr core_op_activated_event,
+ uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
+ hailo_stream_interface_t stream_interface, hailo_status &status) :
+ VdmaInputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size, transfer_timeout, stream_interface, status),
+ m_write_only_mutex(),
+ m_send_pending_mutex()
+{
+ // Checking status for base class c'tor
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<size_t> VdmaInputStream::sync_write_raw_buffer(const MemoryView &buffer)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ status = m_channel->wait(buffer.size(), m_channel_timeout);
+ if ((status == HAILO_STREAM_ABORTED_BY_USER) || (status == HAILO_STREAM_NOT_ACTIVATED)) {
+ return make_unexpected(status);
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
+ "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = m_channel->transfer((void*)buffer.data(), buffer.size());
+ if ((status == HAILO_STREAM_ABORTED_BY_USER) || (status == HAILO_STREAM_NOT_ACTIVATED)) {
+ return make_unexpected(status);
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
+ "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return buffer.size();
+}
+
+hailo_status VdmaInputStream::write_buffer_only(const MemoryView &buffer,
+ const std::function<bool()> &should_cancel)
+{
+ std::unique_lock<std::mutex> lock(m_write_only_mutex);
+ return m_channel->write_buffer(buffer, m_channel_timeout, should_cancel);
+}
+
+hailo_status VdmaInputStream::send_pending_buffer(size_t device_index)
+{
+ std::unique_lock<std::mutex> lock(m_send_pending_mutex);
+ CHECK(0 == device_index, HAILO_INVALID_OPERATION);
+ hailo_status status = m_channel->wait(get_frame_size(), m_channel_timeout);
+ if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
+ return status;
+ }
+ CHECK(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
+ "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
+ CHECK_SUCCESS(status);
+
+ return m_channel->send_pending_buffer();
+}
+
+hailo_status VdmaInputStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
+{
+ ASSERT(NULL != buffer);
+
+ return sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, size)).status();
+}
+
+/** Output stream **/
+
+VdmaOutputStream::VdmaOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+ hailo_status &status) :
+ VdmaOutputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size, transfer_timeout, interface, status),
+ m_read_mutex()
+{
+ // Check status for base class c'tor
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+Expected<size_t> VdmaOutputStream::sync_read_raw_buffer(MemoryView &buffer)
+{
+ hailo_status status = HAILO_UNINITIALIZED;
+
+ status = m_channel->wait(buffer.size(), m_transfer_timeout);
+ if ((status == HAILO_STREAM_ABORTED_BY_USER) || (status == HAILO_STREAM_NOT_ACTIVATED)) {
+ return make_unexpected(status);
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
+ "{} (D2H) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_transfer_timeout.count());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ status = m_channel->transfer(buffer.data(), buffer.size());
+ if ((status == HAILO_STREAM_NOT_ACTIVATED) || (status == HAILO_STREAM_ABORTED_BY_USER)) {
+ return make_unexpected(status);
+ }
+ CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
+ "{} (D2H) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_transfer_timeout.count());
+ CHECK_SUCCESS_AS_EXPECTED(status);
+
+ return buffer.size();
+}
+
+hailo_status VdmaOutputStream::read_all(MemoryView &buffer)
+{
+ std::unique_lock<std::mutex> lock(m_read_mutex);
+ CHECK((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+ "Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
+
+ return sync_read_raw_buffer(buffer).status();
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_stream.hpp
+ * @brief Stream object over vDMA channel
+ **/
+
+#ifndef _HAILO_VDMA_STREAM_HPP_
+#define _HAILO_VDMA_STREAM_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "vdma/vdma_stream_base.hpp"
+#include "vdma/vdma_device.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+
+
+namespace hailort
+{
+
+class VdmaInputStream : public VdmaInputStreamBase
+{
+public:
+ VdmaInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
+ hailo_status &status);
+ virtual ~VdmaInputStream() = default;
+
+ hailo_status write_buffer_only(const MemoryView &buffer, const std::function<bool()> &should_cancel = []() { return false; });
+ hailo_status send_pending_buffer(size_t device_index = 0);
+
+ void notify_all()
+ {
+ return m_channel->notify_all();
+ }
+
+private:
+ virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
+ virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
+
+ std::mutex m_write_only_mutex;
+ std::mutex m_send_pending_mutex;
+
+ friend class InputVDeviceBaseStream;
+ friend class InputVDeviceNativeStream;
+};
+
+class VdmaOutputStream : public VdmaOutputStreamBase
+{
+public:
+ VdmaOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+ hailo_status &status);
+ virtual ~VdmaOutputStream() = default;
+
+private:
+ virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer);
+ virtual hailo_status read_all(MemoryView &buffer) override;
+
+ std::mutex m_read_mutex;
+
+ friend class OutputVDeviceBaseStream;
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_STREAM_HPP_ */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_stream_base.cpp
+ **/
+
+#include "hailo/hailort_common.hpp"
+
+#include "vdma/vdma_stream_base.hpp"
+#include "vdma/vdma_stream.hpp"
+#include "vdma/vdma_async_stream.hpp"
+
+
+namespace hailort
+{
+
+static bool validate_device_interface_compatibility(hailo_stream_interface_t interface, Device::Type type)
+{
+ bool interface_valid = false;
+ switch (type)
+ {
+ case Device::Type::PCIE:
+ interface_valid = (HAILO_STREAM_INTERFACE_PCIE == interface);
+ break;
+
+ case Device::Type::INTEGRATED:
+ interface_valid = (HAILO_STREAM_INTERFACE_INTEGRATED == interface);
+ break;
+
+ default:
+ LOGGER__ERROR("Invalid device type {}", type);
+ return false;
+ }
+
+ if (interface_valid) {
+ return true;
+ }
+
+ LOGGER__ERROR("Invalid interface {} for device of type {}", interface, type);
+ return false;
+}
+
+Expected<std::shared_ptr<VdmaInputStreamBase>> VdmaInputStreamBase::create(hailo_stream_interface_t interface,
+ VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ const hailo_stream_parameters_t &stream_params, uint16_t batch_size, EventPtr core_op_activated_event)
+{
+ CHECK_AS_EXPECTED(validate_device_interface_compatibility(interface, device.get_type()), HAILO_INTERNAL_FAILURE);
+
+ if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
+ CHECK_AS_EXPECTED(channel->type() == vdma::BoundaryChannel::Type::ASYNC, HAILO_INVALID_ARGUMENT,
+ "Can't create a async vdma stream with a non async channel. Received channel type {}", channel->type());
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto result = make_shared_nothrow<VdmaAsyncInputStream>(device, channel, edge_layer, core_op_activated_event,
+ batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::static_pointer_cast<VdmaInputStreamBase>(result);
+ } else {
+ CHECK_AS_EXPECTED(channel->type() == vdma::BoundaryChannel::Type::BUFFERED, HAILO_INVALID_ARGUMENT,
+ "Can't create a vdma stream with a non buffered channel. Received channel type {}", channel->type());
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto result = make_shared_nothrow<VdmaInputStream>(device, channel, edge_layer, core_op_activated_event,
+ batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::static_pointer_cast<VdmaInputStreamBase>(result);
+ }
+}
+
+VdmaInputStreamBase::VdmaInputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
+ const LayerInfo &edge_layer, EventPtr core_op_activated_event,
+ uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
+ hailo_stream_interface_t stream_interface, hailo_status &status) :
+ InputStreamBase(edge_layer, stream_interface, std::move(core_op_activated_event), status),
+ m_device(&device),
+ m_channel(std::move(channel)),
+ m_interface(stream_interface),
+ is_stream_activated(false),
+ m_channel_timeout(transfer_timeout),
+ m_max_batch_size(batch_size),
+ m_dynamic_batch_size(batch_size)
+{
+ // Checking status for base class c'tor
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+VdmaInputStreamBase::~VdmaInputStreamBase()
+{
+ // We want to stop the vdma channel before closing the stream in the firmware
+ // because sending data to a closed stream may terminate the dma engine
+ if (this->is_stream_activated) {
+ const auto status = VdmaInputStreamBase::deactivate_stream();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
+ }
+ }
+}
+
+hailo_stream_interface_t VdmaInputStreamBase::get_interface() const
+{
+ return m_interface;
+}
+
+std::chrono::milliseconds VdmaInputStreamBase::get_timeout() const
+{
+ return this->m_channel_timeout;
+}
+
+hailo_status VdmaInputStreamBase::set_timeout(std::chrono::milliseconds timeout)
+{
+ this->m_channel_timeout = timeout;
+ return HAILO_SUCCESS;
+}
+
+hailo_status VdmaInputStreamBase::abort()
+{
+ return m_channel->abort();
+}
+
+hailo_status VdmaInputStreamBase::clear_abort()
+{
+ return m_channel->clear_abort();
+}
+
+hailo_status VdmaInputStreamBase::flush()
+{
+ const auto dynamic_batch_size = (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_dynamic_batch_size) ?
+ 1 : m_dynamic_batch_size;
+ return m_channel->flush(m_channel_timeout * dynamic_batch_size);
+}
+
+hailo_status VdmaInputStreamBase::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ auto status = set_dynamic_batch_size(dynamic_batch_size);
+ CHECK_SUCCESS(status);
+
+ status = m_channel->activate(0, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status);
+
+ this->is_stream_activated = true;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VdmaInputStreamBase::deactivate_stream()
+{
+ if (!is_stream_activated) {
+ return HAILO_SUCCESS;
+ }
+
+ // Flush is best effort
+ auto status = m_channel->flush(VDMA_FLUSH_TIMEOUT);
+ if (HAILO_STREAM_ABORTED_BY_USER == status) {
+ LOGGER__INFO("Flush input_channel is not needed because channel was aborted. (channel {})", m_channel->get_channel_id());
+ status = HAILO_SUCCESS;
+ } else if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to flush input_channel. (status {} channel {})", status, m_channel->get_channel_id());
+ }
+
+ status = m_channel->deactivate();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to stop channel with status {}", status);
+ }
+
+ this->is_stream_activated = false;
+ return status;
+}
+
+uint16_t VdmaInputStreamBase::get_dynamic_batch_size() const
+{
+ return std::max(m_dynamic_batch_size, static_cast<uint16_t>(1));
+}
+
+const char* VdmaInputStreamBase::get_dev_id() const
+{
+ return m_device->get_dev_id();
+}
+
+Expected<vdma::BoundaryChannel::BufferState> VdmaInputStreamBase::get_buffer_state()
+{
+ return m_channel->get_buffer_state();
+}
+
+Expected<size_t> VdmaInputStreamBase::get_buffer_frames_size() const
+{
+ return m_channel->get_transfers_count_in_buffer(m_stream_info.hw_frame_size);
+}
+
+Expected<size_t> VdmaInputStreamBase::get_pending_frames_count() const
+{
+ return m_channel->get_h2d_pending_frames_count();
+}
+
+hailo_status VdmaInputStreamBase::register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback)
+{
+ return m_channel->register_interrupt_callback(callback);
+}
+
+hailo_status VdmaInputStreamBase::set_dynamic_batch_size(uint16_t dynamic_batch_size)
+{
+ // TODO: use std::max in the configure stage
+ if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
+ LOGGER__TRACE("max_batch_size is CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE; "
+ "Ignoring value of dynamic_batch_size {}", m_dynamic_batch_size);
+ return HAILO_SUCCESS;
+ }
+
+ CHECK(dynamic_batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
+ "Dynamic batch size ({}) must be <= than the configured batch size ({})",
+ dynamic_batch_size, m_max_batch_size);
+
+ if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
+ LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size; "
+ "Leaving previously set value of {}", m_dynamic_batch_size);
+ } else {
+ LOGGER__TRACE("Setting stream's dynamic_batch_size to {}", dynamic_batch_size);
+ m_dynamic_batch_size = dynamic_batch_size;
+
+ const auto status = m_channel->set_transfers_per_axi_intr(m_dynamic_batch_size);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+/** Output stream **/
+Expected<std::shared_ptr<VdmaOutputStreamBase>> VdmaOutputStreamBase::create(hailo_stream_interface_t interface,
+ VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer, uint16_t batch_size,
+ const hailo_stream_parameters_t &stream_params, EventPtr core_op_activated_event)
+{
+ CHECK_AS_EXPECTED(validate_device_interface_compatibility(interface, device.get_type()), HAILO_INTERNAL_FAILURE);
+
+ if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
+ CHECK_AS_EXPECTED(channel->type() == vdma::BoundaryChannel::Type::ASYNC, HAILO_INVALID_ARGUMENT,
+ "Can't create a async vdma stream with a non async channel. Received channel type {}", channel->type());
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto result = make_shared_nothrow<VdmaAsyncOutputStream>(device, channel, edge_layer, core_op_activated_event,
+ batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::static_pointer_cast<VdmaOutputStreamBase>(result);
+ } else {
+ CHECK_AS_EXPECTED(channel->type() == vdma::BoundaryChannel::Type::BUFFERED, HAILO_INVALID_ARGUMENT,
+ "Can't create a vdma stream with a non buffered channel. Received channel type {}", channel->type());
+
+ hailo_status status = HAILO_UNINITIALIZED;
+ auto result = make_shared_nothrow<VdmaOutputStream>(device, channel, edge_layer, core_op_activated_event,
+ batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
+ CHECK_SUCCESS_AS_EXPECTED(status);
+ CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+
+ return std::static_pointer_cast<VdmaOutputStreamBase>(result);
+ }
+}
+
+VdmaOutputStreamBase::VdmaOutputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+ hailo_status &status) :
+ OutputStreamBase(edge_layer, std::move(core_op_activated_event), status),
+ m_device(&device),
+ m_channel(std::move(channel)),
+ m_interface(interface),
+ is_stream_activated(false),
+ m_transfer_timeout(transfer_timeout),
+ m_max_batch_size(batch_size),
+ m_dynamic_batch_size(batch_size),
+ m_transfer_size(get_transfer_size(m_stream_info))
+{
+ // Check status for base class c'tor
+ if (HAILO_SUCCESS != status) {
+ return;
+ }
+
+ status = HAILO_SUCCESS;
+}
+
+VdmaOutputStreamBase::~VdmaOutputStreamBase()
+{
+ // We want to stop the vdma channel before closing the stream in the firmware
+ // because sending data to a closed stream may terminate the dma engine
+ if (this->is_stream_activated) {
+ const auto status = VdmaOutputStreamBase::deactivate_stream();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
+ }
+ }
+}
+
+hailo_stream_interface_t VdmaOutputStreamBase::get_interface() const
+{
+ return m_interface;
+}
+
+hailo_status VdmaOutputStreamBase::set_timeout(std::chrono::milliseconds timeout)
+{
+ this->m_transfer_timeout = timeout;
+ return HAILO_SUCCESS;
+}
+
+std::chrono::milliseconds VdmaOutputStreamBase::get_timeout() const
+{
+ return this->m_transfer_timeout;
+}
+
+hailo_status VdmaOutputStreamBase::abort()
+{
+ return m_channel->abort();
+}
+
+hailo_status VdmaOutputStreamBase::clear_abort()
+{
+ return m_channel->clear_abort();
+}
+
+uint16_t VdmaOutputStreamBase::get_dynamic_batch_size() const
+{
+ return std::max(m_dynamic_batch_size, static_cast<uint16_t>(1));
+}
+
+const char* VdmaOutputStreamBase::get_dev_id() const
+{
+ return m_device->get_dev_id();
+}
+
+Expected<vdma::BoundaryChannel::BufferState> VdmaOutputStreamBase::get_buffer_state()
+{
+ return m_channel->get_buffer_state();
+}
+
+hailo_status VdmaOutputStreamBase::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+{
+ auto status = set_dynamic_batch_size(dynamic_batch_size);
+ CHECK_SUCCESS(status);
+
+ status = m_channel->activate(m_transfer_size, resume_pending_stream_transfers);
+ CHECK_SUCCESS(status);
+
+ this->is_stream_activated = true;
+
+ return HAILO_SUCCESS;
+}
+
+hailo_status VdmaOutputStreamBase::register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback)
+{
+ return m_channel->register_interrupt_callback(callback);
+}
+
+hailo_status VdmaOutputStreamBase::deactivate_stream()
+{
+ if (!is_stream_activated) {
+ return HAILO_SUCCESS;
+ }
+
+ auto status = m_channel->deactivate();
+ if (HAILO_SUCCESS != status) {
+ LOGGER__ERROR("Failed to stop channel with status {}", status);
+ }
+
+ this->is_stream_activated = false;
+ return HAILO_SUCCESS;
+}
+
+uint32_t VdmaOutputStreamBase::get_transfer_size(const hailo_stream_info_t &stream_info)
+{
+ // The ppu outputs one bbox per vdma buffer in the case of nms
+ return (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) ?
+ stream_info.nms_info.bbox_size : stream_info.hw_frame_size;
+}
+
+hailo_status VdmaOutputStreamBase::set_dynamic_batch_size(uint16_t dynamic_batch_size)
+{
+ // TODO: use std::max in the configure stage
+ if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
+ LOGGER__TRACE("max_batch_size is CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE; "
+ "Ignoring value of dynamic_batch_size {}", m_dynamic_batch_size);
+ return HAILO_SUCCESS;
+ }
+
+ CHECK(dynamic_batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
+ "Dynamic batch size ({}) must be <= than the configured batch size ({})",
+ dynamic_batch_size, m_max_batch_size);
+
+ if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
+ LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size; "
+ "Leaving previously set value of {}", m_dynamic_batch_size);
+ } else {
+ LOGGER__TRACE("Setting stream's dynamic_batch_size to {}", dynamic_batch_size);
+ m_dynamic_batch_size = dynamic_batch_size;
+
+ const auto status = m_channel->set_transfers_per_axi_intr(m_dynamic_batch_size);
+ CHECK_SUCCESS(status);
+ }
+
+ return HAILO_SUCCESS;
+}
+
+Expected<size_t> VdmaOutputStreamBase::get_buffer_frames_size() const
+{
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == m_stream_info.format.order) {
+ // In NMS, each output frame has different size depending on the number of bboxes found for each class
+ // and m_stream_info.hw_frame_size is the max frame size. To know the actual frame size and
+ // calculate the number of frames we need to read the content of the buffer (and finding the delimiter for each class in each frame).
+ LOGGER__INFO("NMS is not supported in function get_buffer_frames_size()");
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+
+ return m_channel->get_transfers_count_in_buffer(m_stream_info.hw_frame_size);
+}
+
+Expected<size_t> VdmaOutputStreamBase::get_pending_frames_count() const
+{
+ if (HAILO_FORMAT_ORDER_HAILO_NMS == m_stream_info.format.order) {
+ // In NMS, each output frame has different size depending on the number of bboxes found for each class
+ // and m_stream_info.hw_frame_size is the max frame size. To know the actual frame size and
+ // calculate the number of frames we need to read the content of the buffer (and finding the delimiter for each class in each frame).
+ LOGGER__INFO("NMS is not supported in function get_pending_frames_count()");
+ return make_unexpected(HAILO_NOT_AVAILABLE);
+ }
+
+ auto pending_descs_count = m_channel->get_d2h_pending_descs_count();
+ CHECK_EXPECTED(pending_descs_count);
+
+ auto channel_page_size = m_channel->get_page_size();
+ uint32_t descs_per_frame = (0 == (m_stream_info.hw_frame_size % channel_page_size)) ? (m_stream_info.hw_frame_size / channel_page_size) :
+ ((m_stream_info.hw_frame_size / channel_page_size) + 1);
+
+ return static_cast<size_t>(std::floor(pending_descs_count.value() / descs_per_frame));
+}
+
+} /* namespace hailort */
--- /dev/null
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file vdma_stream_base.hpp
+ * @brief Base class for stream objects over vDMA channel
+ **/
+
+#ifndef _HAILO_VDMA_STREAM_BASE_HPP_
+#define _HAILO_VDMA_STREAM_BASE_HPP_
+
+#include "hailo/hailort.h"
+#include "hailo/expected.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "vdma/vdma_device.hpp"
+#include "vdma/channel/boundary_channel.hpp"
+
+
+namespace hailort
+{
+constexpr std::chrono::seconds VDMA_FLUSH_TIMEOUT(10);
+
+class VdmaInputStreamBase : public InputStreamBase {
+public:
+ static Expected<std::shared_ptr<VdmaInputStreamBase>> create(hailo_stream_interface_t interface,
+ VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ const hailo_stream_parameters_t &stream_params, uint16_t batch_size, EventPtr core_op_activated_event);
+
+ virtual ~VdmaInputStreamBase();
+
+ virtual hailo_stream_interface_t get_interface() const override;
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ virtual hailo_status flush() override;
+ uint16_t get_dynamic_batch_size() const;
+ const char* get_dev_id() const;
+ Expected<vdma::BoundaryChannel::BufferState> get_buffer_state();
+ virtual Expected<size_t> get_buffer_frames_size() const override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback) override;
+
+protected:
+ VdmaInputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
+ hailo_status &status);
+
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
+
+ VdmaDevice *m_device;
+ vdma::BoundaryChannelPtr m_channel;
+ const hailo_stream_interface_t m_interface;
+ bool is_stream_activated;
+ std::chrono::milliseconds m_channel_timeout;
+ const uint16_t m_max_batch_size;
+ uint16_t m_dynamic_batch_size;
+};
+
+class VdmaOutputStreamBase : public OutputStreamBase {
+public:
+ static Expected<std::shared_ptr<VdmaOutputStreamBase>> create(hailo_stream_interface_t interface,
+ VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer, uint16_t batch_size,
+ const hailo_stream_parameters_t &stream_params, EventPtr core_op_activated_event);
+
+ virtual ~VdmaOutputStreamBase();
+
+ virtual hailo_stream_interface_t get_interface() const override;
+ virtual std::chrono::milliseconds get_timeout() const override;
+ virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+ virtual hailo_status abort() override;
+ virtual hailo_status clear_abort() override;
+ uint16_t get_dynamic_batch_size() const;
+ const char* get_dev_id() const;
+ Expected<vdma::BoundaryChannel::BufferState> get_buffer_state();
+ virtual Expected<size_t> get_buffer_frames_size() const override;
+ virtual Expected<size_t> get_pending_frames_count() const override;
+
+ virtual hailo_status register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback);
+
+protected:
+ VdmaOutputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+ EventPtr core_op_activated_event, uint16_t batch_size,
+ std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+ hailo_status &status);
+
+ virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+ virtual hailo_status deactivate_stream() override;
+ static uint32_t get_transfer_size(const hailo_stream_info_t &stream_info);
+ hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
+
+ VdmaDevice *m_device;
+ vdma::BoundaryChannelPtr m_channel;
+ const hailo_stream_interface_t m_interface;
+ bool is_stream_activated;
+ std::chrono::milliseconds m_transfer_timeout;
+ const uint16_t m_max_batch_size;
+ uint16_t m_dynamic_batch_size;
+ const uint32_t m_transfer_size;
+ std::mutex m_read_mutex;
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_VDMA_STREAM_BASE_HPP_ */
+++ /dev/null
-#include "vdma_channel.hpp"
-#include "vdma_channel_regs.hpp"
-#include "hw_consts.hpp"
-#include "common/logger_macros.hpp"
-#include "common/utils.hpp"
-#include "vdma/sg_buffer.hpp"
-#include "vdma_descriptor_list.hpp"
-
-#include "hailo/hailort_common.hpp"
-
-#include <list>
-#include <chrono>
-#include <thread>
-
-#include <iostream>
-
-namespace hailort
-{
-
-#define FD_READ_SIZE (8)
-#define MIN_TIMEOUT_DDR (1000)
-
-/* PLDA descriptor control */
-#define PCIE_DESCRIPTOR_CONTROL_CLR(src)\
- src = (src & (~(uint32_t)0xFF))
-#define PCIE_DESCRIPTOR_CONTROL_SET_DESC_STATUS_REQ(src)\
- src = ((src) | 0x01)
-#define PCIE_DESCRIPTOR_CONTROL_SET_DESC_STATUS_REQ_ERR(src)\
- src = ((src) | 0x02)
-#define PCIE_DESCRIPTOR_CONTROL_SET_DESC_SET_IRQ_ON_ERROR(src)\
- src = ((src) | 0x04)
-#define PCIE_DESCRIPTOR_CONTROL_SET_DESC_SET_IRQ_ON_AXI_DOMAIN(src)\
- src = ((src) | 0x10)
-
-
-void VdmaChannel::State::lock()
-{
-#ifndef _MSC_VER
- int err = pthread_mutex_lock(&m_state_lock);
- if (0 != err) {
- LOGGER__ERROR("Failed destory vdma channel mutex, errno {}", err);
- assert(false);
- }
-#else
- EnterCriticalSection(&m_state_lock);
-#endif
-}
-
-void VdmaChannel::State::unlock()
-{
-#ifndef _MSC_VER
- int err = pthread_mutex_unlock(&m_state_lock);
- assert(0 == err);
- (void)err;
-#else
- LeaveCriticalSection(&m_state_lock);
-#endif
-}
-
-Expected<VdmaChannel> VdmaChannel::create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
- uint16_t requested_desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr)
-{
- CHECK_AS_EXPECTED(Direction::BOTH != direction, HAILO_INVALID_ARGUMENT);
-
- hailo_status status = HAILO_UNINITIALIZED;
- auto desc_page_size_value = driver.calc_desc_page_size(requested_desc_page_size);
- CHECK_AS_EXPECTED(is_powerof2(desc_page_size_value), HAILO_INVALID_ARGUMENT,
- "Descriptor page_size must be a power of two.");
- CHECK_AS_EXPECTED(channel_id.channel_index < VDMA_CHANNELS_PER_ENGINE, HAILO_INVALID_ARGUMENT,
- "Invalid DMA channel index {}", channel_id.channel_index);
- CHECK_AS_EXPECTED(channel_id.engine_index < driver.dma_engines_count(), HAILO_INVALID_ARGUMENT,
- "Invalid DMA engine index {}, max {}", channel_id.engine_index, driver.dma_engines_count());
-
- VdmaChannel object(channel_id, direction, driver, stream_name, latency_meter, desc_page_size_value,
- transfers_per_axi_intr, status);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed creating VdmaChannel");
- return make_unexpected(status);
- }
- return object;
-}
-
-VdmaChannel::VdmaChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
- const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t desc_page_size, uint16_t transfers_per_axi_intr,
- hailo_status &status)
- : m_d2h_callback_thread(nullptr), m_channel_id(channel_id),
- m_direction(direction), m_driver(driver),
- m_host_registers(driver, channel_id, direction),
- m_device_registers(driver, channel_id, other_direction(direction)),
- m_desc_page_size(desc_page_size),
- m_stream_name(stream_name), m_latency_meter(latency_meter), m_channel_enabled(false),
- m_transfers_per_axi_intr(transfers_per_axi_intr), m_pending_buffers_sizes(0), m_pending_num_avail_offset(0), m_is_waiting_for_channel_completion(false),
- m_is_aborted_by_internal_source(false)
-{
- if (m_transfers_per_axi_intr == 0) {
- LOGGER__ERROR("Invalid transfers per axi interrupt");
- status = HAILO_INVALID_ARGUMENT;
- return;
- }
-
- auto channel_handle_memory = MmapBuffer<HailoRTDriver::VdmaChannelHandle>::create_shared_memory(sizeof(HailoRTDriver::VdmaChannelHandle));
- if (!channel_handle_memory) {
- LOGGER__ERROR("Failed allocating shared memory for channel, err = {}", channel_handle_memory.status());
- status = channel_handle_memory.status();
- return;
- }
- m_channel_handle = channel_handle_memory.release();
- *m_channel_handle = HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE;
-
- // The fw activates the channel (in ResourcesManager::enable_state_machine)
- // The driver cleans the channel's state, in case the last shutdown wasn't successful.
- m_channel_enabled = true;
-
- status = HAILO_SUCCESS;
-}
-
-VdmaChannel::~VdmaChannel()
-{
- if (m_channel_enabled) {
- stop_channel();
- m_channel_enabled = false;
- if (Direction::H2D == m_direction) {
- m_can_write_buffer_cv.notify_all();
- } else {
- m_can_read_buffer_cv.notify_all();
- }
- }
-
- if (m_state) {
-#ifndef _MSC_VER
- int err = pthread_mutex_destroy(&m_state->m_state_lock);
- if (0 != err) {
- LOGGER__ERROR("Failed destory vdma channel mutex, errno {}", err);
- }
-#else
- DeleteCriticalSection(&m_state->m_state_lock);
-#endif
- }
-}
-
-VdmaChannel::VdmaChannel(VdmaChannel &&other) noexcept:
- m_d2h_callback_thread(std::move(other.m_d2h_callback_thread)),
- m_channel_id(std::move(other.m_channel_id)),
- m_direction(other.m_direction),
- m_driver(other.m_driver),
- m_host_registers(std::move(other.m_host_registers)),
- m_device_registers(std::move(other.m_device_registers)),
- m_desc_page_size(other.m_desc_page_size),
- m_buffer(std::move(other.m_buffer)),
- m_stream_name(std::move(other.m_stream_name)),
- m_latency_meter(std::move(other.m_latency_meter)),
- m_state(std::move(other.m_state)),
- m_channel_handle(std::move(other.m_channel_handle)),
- m_channel_enabled(std::exchange(other.m_channel_enabled, false)),
- m_transfers_per_axi_intr(std::move(other.m_transfers_per_axi_intr)),
- m_pending_buffers_sizes(std::move(other.m_pending_buffers_sizes)),
- m_pending_num_avail_offset(other.m_pending_num_avail_offset.exchange(0)),
- m_is_waiting_for_channel_completion(other.m_is_waiting_for_channel_completion.exchange(false)),
- m_is_aborted_by_internal_source(other.m_is_aborted_by_internal_source.exchange(false))
-{}
-
-hailo_status VdmaChannel::stop_channel()
-{
- if (!m_state) {
- const auto status = unregister_fw_controlled_channel();
- CHECK_SUCCESS(status, "Failed to disable channel {}", m_channel_id);
-
- } else {
- std::unique_lock<State> state_guard(*m_state);
- const auto status = unregister_fw_controlled_channel();
- CHECK_SUCCESS(status, "Failed to disable channel {}", m_channel_id);
-
- if (Direction::D2H == m_direction) {
- unregister_for_d2h_interrupts(state_guard);
- } else {
- if (m_state->m_should_reprogram_buffer || !m_pending_buffers_sizes.empty()) {
- // If we've already reprogrammed the buffer or there are pending buffers, we'll set m_previous_tail
- const auto curr_tail = CB_TAIL(m_state->m_descs);
- m_state->m_previous_tail = (curr_tail + m_state->m_previous_tail) & m_state->m_descs.size_mask;
- m_state->m_should_reprogram_buffer = true;
- }
- // For H2D channels we reset counters as we want to allow writes to the start of the buffer while the channel is stopped
- reset_internal_counters();
- }
-
- }
- return HAILO_SUCCESS;
-}
-
-uint16_t VdmaChannel::get_page_size()
-{
- return m_desc_page_size;
-}
-
-Expected<CONTROL_PROTOCOL__host_buffer_info_t> VdmaChannel::get_boundary_buffer_info(uint32_t transfer_size)
-{
- CHECK_AS_EXPECTED(m_buffer, HAILO_INVALID_OPERATION, "Cannot get host buffer before buffer is allocated");
- return m_buffer->get_host_buffer_info(transfer_size);
-}
-
-hailo_status VdmaChannel::abort()
-{
- {
- std::lock_guard<State> state_guard(*m_state);
- m_is_aborted_by_internal_source = true;
- }
-
- if (Direction::H2D == m_direction) {
- m_can_write_buffer_cv.notify_all();
- } else {
- m_can_read_buffer_cv.notify_all();
- }
- return m_driver.vdma_channel_abort(m_channel_id, *m_channel_handle);
-}
-
-hailo_status VdmaChannel::clear_abort()
-{
- auto status = m_driver.vdma_channel_clear_abort(m_channel_id, *m_channel_handle);
- {
- std::lock_guard<State> state_guard(*m_state);
- m_is_aborted_by_internal_source = false;
- }
- return status;
-}
-
-size_t VdmaChannel::get_transfers_count_in_buffer(size_t transfer_size)
-{
- const auto descs_in_transfer = m_buffer->descriptors_in_buffer(transfer_size);
- const auto descs_count = CB_SIZE(m_state->m_descs);
- return (descs_count - 1) / descs_in_transfer;
-}
-
-size_t VdmaChannel::get_buffer_size() const
-{
- assert(m_buffer);
- return m_buffer->size();
-}
-
-Expected<size_t> VdmaChannel::get_h2d_pending_frames_count()
-{
- return m_pending_buffers_sizes.size();
-}
-
-Expected<size_t> VdmaChannel::get_d2h_pending_descs_count()
-{
- assert(m_state);
-
- std::lock_guard<State> state_guard(*m_state);
-
- int num_proc = CB_TAIL(m_state->m_descs);
- int desc_num_ready = CB_PROG(m_state->m_descs, num_proc, m_state->m_d2h_read_desc_index);
-
- return desc_num_ready;
-}
-
-hailo_status VdmaChannel::prepare_d2h_pending_descriptors(uint32_t transfer_size)
-{
- assert(m_buffer);
-
- auto transfers_count_in_buffer = get_transfers_count_in_buffer(transfer_size);
- auto transfers_count = std::min(transfers_count_in_buffer,
- static_cast<size_t>(CB_SIZE(m_state->m_buffers) - 1));
-
- // on D2H no need for interrupt of first descriptor
- const auto first_desc_interrupts_domain = VdmaInterruptsDomain::NONE;
- for (uint32_t i = 0; i < transfers_count; i++) {
- /* Provide FW interrupt only in the end of the last transfer in the batch */
- auto last_desc_interrutps_domain =
- (static_cast<uint32_t>(m_transfers_per_axi_intr - 1) == (i % m_transfers_per_axi_intr)) ?
- VdmaInterruptsDomain::BOTH : VdmaInterruptsDomain::HOST;
- auto status = prepare_descriptors(transfer_size, first_desc_interrupts_domain, last_desc_interrutps_domain);
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("preparing descriptors failed because channel is not activated");
- return status;
- }
- CHECK_SUCCESS(status, "Failed prepare desc status={}", status);
- }
-
- /* We assume each output transfer is in the same size */
- m_state->m_accumulated_transfers += ((m_state->m_accumulated_transfers + transfers_count) % m_transfers_per_axi_intr);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::allocate_resources(uint32_t descs_count)
-{
- // TODO (HRT-3762) : Move channel's state to driver to avoid using shared memory
- auto state = MmapBuffer<VdmaChannel::State>::create_shared_memory(sizeof(VdmaChannel::State));
- CHECK_EXPECTED_AS_STATUS(state, "Failed to allocate channel's resources");
-
-#ifndef _MSC_VER
- // Make sharable mutex
- pthread_mutexattr_t mutex_attrs{};
- int err = pthread_mutexattr_init(&mutex_attrs);
- CHECK(0 == err, HAILO_INTERNAL_FAILURE, "pthread_mutexattr_init failed with {}", err);
-
- err = pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED);
- if (0 != err) {
- (void)pthread_mutexattr_destroy(&mutex_attrs);
- LOGGER__ERROR("pthread_mutexattr_setpshared failed with {}", err);
- return HAILO_INTERNAL_FAILURE;
- }
-
- err = pthread_mutex_init(&state.value()->m_state_lock, &mutex_attrs);
- if (0 != pthread_mutexattr_destroy(&mutex_attrs)) {
- LOGGER__ERROR("pthread_mutexattr_destroy failed");
- // continue
- }
- CHECK(0 == err, HAILO_INTERNAL_FAILURE, "Mutex init failed with {}", err);
-#else
- InitializeCriticalSection(&state.value()->m_state_lock);
-#endif
-
- m_state = state.release();
- m_pending_buffers_sizes = CircularArray<size_t>(descs_count);
-
- // If measuring latency, max_active_transfer is limited to 16 (see hailort_driver.hpp doc for further information)
- int pending_buffers_size = (nullptr == m_latency_meter) ? static_cast<int>(m_state->m_pending_buffers.size()) :
- (static_cast<int>(m_state->m_pending_buffers.size()) / 2);
-
- if (MAX_DESCS_COUNT < descs_count) {
- LOGGER__ERROR("Vdma channel descs_count mustn't be larger than {}", MAX_DESCS_COUNT);
- return HAILO_INVALID_ARGUMENT;
- }
-
- CB_INIT(m_state->m_descs, descs_count);
- CB_INIT(m_state->m_buffers, pending_buffers_size);
- m_state->m_previous_tail = 0;
- m_state->m_should_reprogram_buffer = false;
-
- // Allocate descriptor list (host side)
- auto status = allocate_buffer(descs_count * m_desc_page_size);
- CHECK_SUCCESS(status, "Failed to allocate vDMA buffer for channel transfer! status={}", status);
-
- clear_descriptor_list();
-
- return HAILO_SUCCESS;
-}
-
-void VdmaChannel::reset_internal_counters()
-{
- assert(m_state);
- CB_RESET(m_state->m_descs);
- CB_RESET(m_state->m_buffers);
- m_state->m_d2h_read_desc_index = 0;
- m_state->m_last_timestamp_num_processed = 0;
- m_state->m_accumulated_transfers = 0;
-}
-
-hailo_status VdmaChannel::complete_channel_activation(uint32_t transfer_size)
-{
- /* descriptor buffer must be allocated */
- assert(m_buffer);
- assert(m_state);
- std::lock_guard<State> state_guard(*m_state);
-
- CHECK(HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE != *m_channel_handle,
- HAILO_INTERNAL_FAILURE, "Vdma channel must be registered before activation");
-
- reset_internal_counters();
-
- if ((Direction::D2H == m_direction) && (transfer_size != 0)) {
- auto status = prepare_d2h_pending_descriptors(transfer_size);
- if (HAILO_SUCCESS != status) {
- stop_channel();
- }
- return status;
- }
-
- // We should have no active transfers now
- if (m_state->m_should_reprogram_buffer) {
- auto status = m_buffer->reprogram_buffer_offset(m_state->m_previous_tail * m_desc_page_size, m_channel_id.channel_index);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::register_fw_controlled_channel()
-{
- return register_channel_to_driver();
-}
-
-void VdmaChannel::notify_all()
-{
- {
- // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv
- std::lock_guard<State> state_guard(*m_state);
- }
- m_can_write_buffer_cv.notify_all();
- m_can_read_buffer_cv.notify_all();
-}
-
-hailo_status VdmaChannel::register_for_d2h_interrupts(const std::function<void(uint32_t)> &callback)
-{
- // This function has to be called after channel is started
- assert(!((m_d2h_callback_thread) && m_d2h_callback_thread->joinable()));
- m_d2h_callback_thread = make_unique_nothrow<std::thread>([this, callback]() {
- wait_d2h_callback(callback);
- });
- CHECK_NOT_NULL(m_d2h_callback_thread, HAILO_OUT_OF_HOST_MEMORY);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::unregister_for_d2h_interrupts(std::unique_lock<State> &lock)
-{
- // This function has to be called after channel is stopped (after unregister_fw_controlled_channel is called)
- if ((m_d2h_callback_thread) && (m_d2h_callback_thread->joinable())) {
- // Let the channel finish processing interrupts
- lock.unlock();
- m_d2h_callback_thread->join();
- lock.lock();
- }
- return HAILO_SUCCESS;
-}
-
-void VdmaChannel::wait_d2h_callback(const std::function<void(uint32_t)> &callback)
-{
- assert(Direction::D2H == m_direction);
- if(!m_buffer) {
- LOGGER__ERROR("Wait called without allocating buffers");
- return;
- }
- while (true) {
- auto status = wait_for_channel_completion(HAILO_INFINITE_TIMEOUT, callback);
- if (HAILO_SUCCESS == status || (HAILO_STREAM_ABORTED_BY_USER == status)) {
- // Ignore HAILO_STREAM_ABORTED_BY_USER as we want to keep waiting for interrupts until channel is stopped
- continue;
- } else if (HAILO_STREAM_NOT_ACTIVATED == status) {
- // Finish gracefully
- return;
- } else {
- LOGGER__ERROR("wait_d2h_callback failed with status={}", status);
- return;
- }
- }
-}
-
-hailo_status VdmaChannel::wait(size_t buffer_size, std::chrono::milliseconds timeout)
-{
- if (!m_buffer) {
- LOGGER__ERROR("Wait called without allocating buffers");
- return HAILO_INVALID_OPERATION;
- }
-
- CHECK(buffer_size < m_buffer->size(), HAILO_INVALID_ARGUMENT,
- "Requested transfer size ({}) must be smaller than ({})", buffer_size, m_buffer->size());
-
- if ((Direction::D2H == m_direction) && ((m_d2h_callback_thread) && (m_d2h_callback_thread->joinable()))) {
- std::unique_lock<State> state_guard(*m_state);
- hailo_status status = HAILO_SUCCESS; // Best effort
- bool was_successful = m_can_read_buffer_cv.wait_for(state_guard, timeout, [this, buffer_size, &status] () {
- if ((!m_channel_enabled) || (m_is_aborted_by_internal_source)) {
- status = HAILO_STREAM_ABORTED_BY_USER;
- return true; // return true so that the wait will finish
- }
- return is_ready_for_transfer_d2h(buffer_size);
- });
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("wait_for in d2h wait was aborted!");
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- CHECK(was_successful, HAILO_TIMEOUT);
- return HAILO_SUCCESS;
- }
- auto is_ready_for_transfer = (Direction::H2D == m_direction) ?
- std::bind(&VdmaChannel::is_ready_for_transfer_h2d, this, buffer_size) :
- std::bind(&VdmaChannel::is_ready_for_transfer_d2h, this, buffer_size);
- return wait_for_condition(is_ready_for_transfer, timeout);
-}
-
-hailo_status VdmaChannel::transfer(void *buf, size_t count)
-{
- CHECK((nullptr != buf) && (0 < count), HAILO_INVALID_ARGUMENT);
- CHECK(nullptr != m_buffer, HAILO_INVALID_OPERATION, "Transfer called without allocating buffers");
-
- hailo_status status = HAILO_UNINITIALIZED;
- assert(m_state);
- std::lock_guard<State> state_guard(*m_state);
-
- if (m_is_aborted_by_internal_source) {
- LOGGER__INFO("Tried to write to aborted channel {}", m_channel_id);
- return HAILO_STREAM_ABORTED_BY_USER;
- }
-
- if (Direction::H2D == m_direction) {
- status = transfer_h2d(buf, count);
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("Transfer failed because Channel {} is not activated", m_channel_id);
- return HAILO_STREAM_NOT_ACTIVATED;
- }
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Transfer failed for channel {}", m_channel_id);
- return status;
- }
- return HAILO_SUCCESS;
- } else {
- status = transfer_d2h(buf, count);
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("Transfer failed because Channel {} is not activated", m_channel_id);
- return HAILO_STREAM_NOT_ACTIVATED;
- }
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Transfer failed for channel {} status {}", m_channel_id, status);
- return status;
- }
- return HAILO_SUCCESS;
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::write_buffer_impl(const MemoryView &buffer)
-{
- CHECK(nullptr != m_buffer, HAILO_INVALID_OPERATION, "Transfer called without allocating buffers");
-
- size_t desired_desc_num = m_buffer->descriptors_in_buffer(buffer.size());
- uint32_t desc_avail = (get_num_available() + m_pending_num_avail_offset) & m_state->m_descs.size_mask;
-
- assert(CB_AVAIL(m_state->m_descs, desc_avail, CB_TAIL(m_state->m_descs)) >= static_cast<uint16_t>(desired_desc_num));
-
- /* Copy buffer into the PLDA data struct */
- auto offset = ((desc_avail + m_state->m_previous_tail) & m_state->m_descs.size_mask) * m_desc_page_size;
- auto status = m_buffer->write_cyclic(buffer.data(), buffer.size(), offset);
- CHECK_SUCCESS(status);
-
- m_pending_num_avail_offset = static_cast<uint16_t>(m_pending_num_avail_offset + desired_desc_num);
-
- CHECK(!m_pending_buffers_sizes.full(), HAILO_INVALID_OPERATION, "Cannot add more pending buffers!");
- m_pending_buffers_sizes.push_back(buffer.size());
- return HAILO_SUCCESS;
-}
-
-Expected<VdmaChannel::BufferState> VdmaChannel::get_buffer_state()
-{
- BufferState result;
- result.num_avail = static_cast<uint16_t>(CB_HEAD(m_state->m_descs));
- result.num_processed = static_cast<uint16_t>(CB_TAIL(m_state->m_descs));
- auto hw_num_avail = m_host_registers.get_num_available();
- CHECK_EXPECTED(hw_num_avail);
- result.hw_num_avail = hw_num_avail.release();
- auto hw_num_processed = get_hw_num_processed();
- CHECK_EXPECTED(hw_num_processed);
- result.hw_num_processed = hw_num_processed.release();
-
- // Get a snapshot of the buffer
- auto vdma_buffer_copy = Buffer::create(m_buffer->size());
- CHECK_EXPECTED(vdma_buffer_copy);
- // If this a D2H channel, we need to sync the vdma buffer so that we'll get an updated view of the buffer
- const auto sync_needed = Direction::D2H == m_direction;
- const auto status = m_buffer->read_cyclic(vdma_buffer_copy->data(), vdma_buffer_copy->size(), 0, sync_needed);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- for (size_t offset = 0; offset < vdma_buffer_copy->size(); offset += m_desc_page_size) {
- auto chunk = Buffer::create(vdma_buffer_copy->data() + offset, m_desc_page_size);
- CHECK_EXPECTED(chunk);
- const auto abs_index = offset / m_desc_page_size;
- const auto desc_num = (abs_index >= static_cast<uint16_t>(m_state->m_previous_tail)) ?
- abs_index - m_state->m_previous_tail :
- m_state->m_descs.size - m_state->m_previous_tail + abs_index;
- result.desc_buffer_pairing.emplace_back(static_cast<uint16_t>(desc_num), chunk.release());
- }
-
- return result;
-}
-
-hailo_status VdmaChannel::write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
- const std::function<bool()> &should_cancel)
-{
- assert(m_state);
- std::unique_lock<State> state_guard(*m_state);
-
- size_t desired_desc_num = m_buffer->descriptors_in_buffer(buffer.size());
- hailo_status channel_completion_status = HAILO_SUCCESS;
- bool was_successful = m_can_write_buffer_cv.wait_for(state_guard, timeout, [this, desired_desc_num, timeout, &should_cancel,
- &state_guard, &channel_completion_status] () {
- if ((!m_channel_enabled) || (m_is_aborted_by_internal_source)) {
- return true;
- }
-
- if (should_cancel()) {
- channel_completion_status = HAILO_STREAM_ABORTED_BY_USER;
- return true;
- }
-
- // Limit writes to not surpass size of m_buffers
- int written_buffers_count = static_cast<int>(m_pending_buffers_sizes.size());
- int sent_buffers_count = CB_PROG(m_state->m_buffers, CB_HEAD(m_state->m_buffers), CB_TAIL(m_state->m_buffers));
- if (written_buffers_count + sent_buffers_count >= CB_SIZE(m_state->m_buffers)) {
- return false;
- }
-
- // TODO (HRT-7252): Clean this code
- while (true) {
- int buffers_head = CB_HEAD(m_state->m_buffers);
- int buffers_tail = CB_TAIL(m_state->m_buffers);
- bool has_space_in_buffers = CB_AVAIL(m_state->m_buffers, buffers_head, buffers_tail);
-
- uint32_t desc_avail = (get_num_available() + m_pending_num_avail_offset) & m_state->m_descs.size_mask;
- int num_free = CB_AVAIL(m_state->m_descs, desc_avail, CB_TAIL(m_state->m_descs));
- bool has_desc_space = (num_free >= static_cast<uint16_t>(desired_desc_num));
-
- if (has_space_in_buffers && has_desc_space) {
- break;
- }
-
- if (HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE == *m_channel_handle) {
- return false;
- }
-
- state_guard.unlock();
- channel_completion_status = wait_for_channel_completion(timeout);
- state_guard.lock();
- if (HAILO_SUCCESS != channel_completion_status) {
- LOGGER__INFO("wait_for_channel_completion failed with status={}", channel_completion_status);
- return true;
- }
- }
-
- return true;
- });
- if ((!m_channel_enabled) || (m_is_aborted_by_internal_source) || (HAILO_STREAM_ABORTED_BY_USER == channel_completion_status)) {
- LOGGER__INFO("wait_for in write_buffer was aborted!");
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- CHECK(was_successful, HAILO_TIMEOUT, "Waiting for descriptors in write_buffer has reached a timeout!");
- if (HAILO_STREAM_ABORTED_BY_USER == channel_completion_status) {
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- CHECK_SUCCESS(channel_completion_status);
-
- return write_buffer_impl(buffer);
-}
-
-hailo_status VdmaChannel::send_pending_buffer_impl()
-{
- CHECK(!m_pending_buffers_sizes.empty(), HAILO_INVALID_OPERATION, "There are no pending buffers to send!");
- assert(m_buffer);
-
- // For h2d, only the host need to get transfer done interrupts
- VdmaInterruptsDomain last_desc_interrupts_domain = VdmaInterruptsDomain::HOST;
- // If we measure latency, we need interrupt on the first descriptor
- VdmaInterruptsDomain first_desc_interrupts_domain = (m_latency_meter != nullptr) ?
- VdmaInterruptsDomain::HOST : VdmaInterruptsDomain::NONE;
-
- auto status = prepare_descriptors(m_pending_buffers_sizes.front(), first_desc_interrupts_domain, last_desc_interrupts_domain);
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("sending pending buffer failed because stream is not activated");
- // Stream was aborted during transfer - reset pending buffers
- m_pending_num_avail_offset = 0;
- while (m_pending_buffers_sizes.size() > 0) {
- m_pending_buffers_sizes.pop_front();
- }
- return status;
- }
- CHECK_SUCCESS(status);
- m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;
-
- size_t desired_desc_num = m_buffer->descriptors_in_buffer(m_pending_buffers_sizes.front());
- m_pending_num_avail_offset = static_cast<uint16_t>(m_pending_num_avail_offset - desired_desc_num);
-
- m_pending_buffers_sizes.pop_front();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::send_pending_buffer()
-{
- {
- assert(m_state);
- assert(m_buffer);
- std::lock_guard<State> state_guard(*m_state);
-
- auto status = send_pending_buffer_impl();
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("stream is not activated");
- return HAILO_STREAM_NOT_ACTIVATED;
- } else {
- CHECK_SUCCESS(status);
- }
- }
- m_can_write_buffer_cv.notify_one();
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::sync_state(std::chrono::milliseconds timeout)
-{
- {
- std::lock_guard<State> state_guard(*m_state);
-
- // Make sure that only one thread is waiting for channel completion
- if (m_is_waiting_for_channel_completion) {
- return HAILO_SUCCESS;
- }
- }
- return wait_for_channel_completion(timeout);
-}
-
-hailo_status VdmaChannel::flush(const std::chrono::milliseconds &timeout)
-{
- assert(m_state);
-
- if (Direction::D2H == m_direction) {
- // We are not buffering user data
- return HAILO_SUCCESS;
- }
-
- if (!m_buffer) {
- LOGGER__ERROR("VdmaChannel::flush is called on a channel without allocated resources");
- return HAILO_INVALID_OPERATION;
- }
-
- return wait_for_condition([this] { return CB_HEAD(m_state->m_buffers) == CB_TAIL(m_state->m_buffers); }, timeout);
-}
-
-hailo_status VdmaChannel::transfer_h2d(void *buf, size_t count)
-{
- auto status = write_buffer_impl(MemoryView(buf, count));
- CHECK_SUCCESS(status);
-
- status = send_pending_buffer_impl();
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- return status;
- } else {
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::transfer_d2h(void *buf, size_t count)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- /* Provide FW interrupt only in the end of the last transfer in the batch */
- VdmaInterruptsDomain first_desc_interrupts_domain = VdmaInterruptsDomain::NONE;
- VdmaInterruptsDomain last_desc_interrupts_domain = (m_state->m_accumulated_transfers + 1 == m_transfers_per_axi_intr) ?
- VdmaInterruptsDomain::BOTH : VdmaInterruptsDomain::HOST;
-
- assert(m_state);
- assert(m_buffer);
-
- auto desired_desc_num = m_buffer->descriptors_in_buffer(count);
- assert(desired_desc_num <= MAX_DESCS_COUNT);
- int desc_num = static_cast<int>(desired_desc_num);
-
- int num_processes = CB_TAIL(m_state->m_descs);
- int num_ready = CB_PROG(m_state->m_descs, num_processes, m_state->m_d2h_read_desc_index);
- if (num_ready < desc_num) {
- return HAILO_OUT_OF_DESCRIPTORS;
- }
-
- size_t offset = m_state->m_d2h_read_desc_index * m_desc_page_size;
- status = m_buffer->read_cyclic(buf, count, offset);
- if (status != HAILO_SUCCESS) {
- return status;
- }
-
- m_state->m_d2h_read_desc_index = (m_state->m_d2h_read_desc_index + desc_num) & m_state->m_descs.size_mask;
-
- // prepare descriptors for next recv
- if (*m_channel_handle != HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE) {
- status = prepare_descriptors(count, first_desc_interrupts_domain, last_desc_interrupts_domain);
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- LOGGER__INFO("transfer d2h failed because stream is not activated");
- return status;
- }
- CHECK_SUCCESS(status);
- }
-
- m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;
-
- return HAILO_SUCCESS;
-}
-
-uint16_t VdmaChannel::get_num_available()
-{
- assert(m_state);
-
- uint16_t num_available = (uint16_t)CB_HEAD(m_state->m_descs);
-
-#ifndef NDEBUG
- // Validate synchronization with HW
- auto hw_num_avail = m_host_registers.get_num_available();
- assert(hw_num_avail);
- // On case of channel aborted, the num_available is set to 0 (so we don't accept sync)
-
- auto is_aborted_exp = is_aborted();
- assert(is_aborted_exp);
-
- if ((HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE != *m_channel_handle) && !is_aborted_exp.value()) {
- assert(hw_num_avail.value() == num_available);
- }
-#endif
- return num_available;
-}
-
-Expected<uint16_t> VdmaChannel::get_hw_num_processed()
-{
- assert(m_state);
-
- auto hw_num_processed = m_host_registers.get_num_processed();
- CHECK_EXPECTED(hw_num_processed, "Fail to read vdma num processed register");
-
- // Although the hw_num_processed should be a number between 0 and m_descs.size-1, if
- // m_desc.size < 0x10000 (the maximum desc size), the actual hw_num_processed is a number
- // between 1 and m_descs.size. Therefore the value can be m_descs.size, in this case we change it
- // to zero.
- return static_cast<uint16_t>(hw_num_processed.value() & m_state->m_descs.size_mask);
-}
-
-hailo_status VdmaChannel::set_num_avail_value(uint16_t new_value)
-{
- // TODO - HRT-7885 : add check in driver
- CHECK(*m_channel_handle != HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE, HAILO_STREAM_NOT_ACTIVATED,
- "Error, can't set num available when stream is not activated");
-
- auto status = m_host_registers.set_num_available(new_value);
- CHECK_SUCCESS(status, "Fail to write vdma num available register");
-
-#ifndef NDEBUG
- // Validate synchronization with HW
- auto hw_num_avail = m_host_registers.get_num_available();
- assert(hw_num_avail);
- assert(hw_num_avail.value() == new_value);
-#endif
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::set_transfers_per_axi_intr(uint16_t transfers_per_axi_intr)
-{
- CHECK(0 != transfers_per_axi_intr, HAILO_INVALID_ARGUMENT, "Invalid transfers per axi interrupt");
- m_transfers_per_axi_intr = transfers_per_axi_intr;
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::inc_num_available(uint16_t value)
-{
- assert(m_state);
-
- //TODO: validate that count is added.
- int num_available = get_num_available();
- int num_processed = CB_TAIL(m_state->m_descs);
- int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
- if (value > num_free) {
- return HAILO_OUT_OF_DESCRIPTORS;
- }
-
- CB_ENQUEUE(m_state->m_descs, value);
- num_available = (num_available + value) & m_state->m_descs.size_mask;
- return set_num_avail_value(static_cast<uint16_t>(num_available));
-}
-
-void VdmaChannel::add_pending_buffer(uint32_t first_desc, uint32_t last_desc)
-{
- assert(m_state);
-
- int head = CB_HEAD(m_state->m_buffers);
- int tail = CB_TAIL(m_state->m_buffers);
- if (!CB_AVAIL(m_state->m_buffers, head, tail)) {
- LOGGER__ERROR("no avail space");
- }
- m_state->m_pending_buffers[head].last_desc = last_desc;
- m_state->m_pending_buffers[head].latency_measure_desc = (m_direction == Direction::H2D) ? first_desc : last_desc;
- CB_ENQUEUE(m_state->m_buffers, 1);
-}
-
-VdmaChannel::Direction VdmaChannel::other_direction(Direction direction)
-{
- return (Direction::H2D == direction) ? Direction::D2H : Direction::H2D;
-}
-
-hailo_status VdmaChannel::unregister_fw_controlled_channel()
-{
- assert(m_channel_handle);
-
- if (m_state) {
- // m_state is locked from stop_channel
- m_state->m_channel_is_active = false;
- }
-
- if (HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE != *m_channel_handle) {
- auto status = m_driver.vdma_channel_disable(m_channel_id, *m_channel_handle);
- *m_channel_handle = HailoRTDriver::INVALID_VDMA_CHANNEL_HANDLE;
- CHECK_SUCCESS(status, "Failed to disable channel {}", m_channel_id);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaChannel::register_channel_to_driver()
-{
- const bool measure_latency = (nullptr != m_latency_meter);
- auto channel_handle = m_driver.vdma_channel_enable(m_channel_id, m_direction, measure_latency);
- CHECK_EXPECTED_AS_STATUS(channel_handle, "Failed to enable channel {}", m_channel_id);
-
- *m_channel_handle = channel_handle.release();
-
- if (m_state) {
- std::lock_guard<State> state_guard(*m_state);
- m_state->m_channel_is_active = true;
- }
-
- return HAILO_SUCCESS;
-}
-
-// TODO - HRT-6984 - move function inside desc list class as part of the ctor
-void VdmaChannel::clear_descriptor_list()
-{
- assert(m_buffer);
-
- size_t desc_number = m_buffer->descs_count();
- size_t page_size = m_buffer->desc_page_size();
- auto desc_list = m_buffer->get_desc_list();
-
- // Config Descriptors value in SG-List Host side
- for (uint32_t j = 0; j < desc_number; j++) {
- VdmaDescriptor &descInfo = (desc_list->get())[j];
- descInfo.PageSize_DescControl = static_cast<uint32_t>((page_size << 8) + 0x2);
- descInfo.RemainingPageSize_Status = 0x0;
- }
-}
-
-hailo_status VdmaChannel::allocate_buffer(const uint32_t buffer_size)
-{
- assert((buffer_size % m_desc_page_size) == 0);
- uint32_t desc_count = buffer_size / m_desc_page_size;
-
- if (m_buffer) {
- LOGGER__ERROR("m_buffer is not NULL");
- return HAILO_INVALID_OPERATION;
- }
-
- auto buffer = vdma::SgBuffer::create(m_driver, desc_count, m_desc_page_size, m_direction,
- m_channel_id.channel_index);
- CHECK_EXPECTED_AS_STATUS(buffer);
-
- m_buffer = make_unique_nothrow<vdma::SgBuffer>(buffer.release());
- CHECK_NOT_NULL(m_buffer, HAILO_OUT_OF_HOST_MEMORY);
-
- return HAILO_SUCCESS;
-}
-
-uint32_t VdmaChannel::calculate_buffer_size(const HailoRTDriver &driver, uint32_t transfer_size,
- uint32_t transfers_count, uint16_t requested_desc_page_size) {
- auto desc_page_size = driver.calc_desc_page_size(requested_desc_page_size);
- uint32_t descs_per_transfer = VdmaDescriptorList::descriptors_in_buffer(transfer_size, desc_page_size);
- uint32_t descs_count = descs_per_transfer * transfers_count;
-
- if (descs_count > MAX_DESCS_COUNT) {
- descs_count = MAX_DESCS_COUNT;
- }
- else if (descs_count < MIN_DESCS_COUNT) {
- descs_count = MIN_DESCS_COUNT;
- }
-
- return descs_count * desc_page_size;
-}
-
-hailo_status VdmaChannel::trigger_channel_completion(uint16_t hw_num_processed, const std::function<void(uint32_t)> &callback)
-{
- // NOTE: right now, we can retake the 'completion' descriptor for a new transfer before handling the interrupt.
- // we should have our own pointers indicating whats free instead of reading from HW.
- // TODO: consider calculating the last descriptor using the src_desc_avail and src_desc_proc instead of using
- // status?
- // TODO: we might free a pending buffer which we didn't get an interrupt for yet. we should still handle this
- // situation correctly.
-
- assert(m_state);
- assert(m_buffer);
- std::lock_guard<State> state_guard(*m_state);
-
- if (m_is_aborted_by_internal_source) {
- return HAILO_STREAM_ABORTED_BY_USER;
- }
-
- int processed_no = 0;
- int head = CB_HEAD(m_state->m_buffers);
- int tail = CB_TAIL(m_state->m_buffers);
- int prog = CB_PROG(m_state->m_buffers, head, tail);
- int last_tail = -1;
- auto channel_error = m_host_registers.get_channel_error();
- CHECK_EXPECTED_AS_STATUS(channel_error, "Fail to read vdma channel error register");
- CHECK(0 == channel_error.value(), HAILO_INTERNAL_FAILURE, "Vdma channel {} in error state {}", m_channel_id,
- channel_error.value());
-
- uint16_t last_num_processed = static_cast<uint16_t>(CB_TAIL(m_state->m_descs));
-
- for (; prog > 0; prog--) {
- uint16_t last_desc_index = static_cast<uint16_t>(m_state->m_pending_buffers[tail].last_desc);
- // Transfer is complete if its last descriptor is in [last_num_processed, hw_num_processed) or
- // the the buffer is empty (hw_num_processed == get_num_available())
- bool is_complete = is_desc_between(last_num_processed, hw_num_processed, last_desc_index) || (hw_num_processed == get_num_available());
-
-#ifndef NDEBUG
- auto status = (m_buffer->get_desc_list()->get())[last_desc_index].RemainingPageSize_Status & 0xFF;
- // Verify if a DMA Descriptor error occurred.
- if (status & 0x2) {
- LOGGER__ERROR("Error while processing descriptor {} of DMA {} on board {}.", last_desc_index, m_channel_id,
- m_driver.dev_path());
- return HAILO_INTERNAL_FAILURE;
- }
-
- // status is read after hw_num_processed, so we want is_complete -> (status == 1).
- assert(!is_complete || ((status & 0x1) == 1));
-#endif
-
- if (!is_complete) {
- break;
- }
-
- processed_no++;
- last_tail = tail;
- tail = ((tail + 1) & m_state->m_buffers.size_mask);
- }
-
- if (0 < processed_no) {
- // TODO: use a different macro instead?
- _CB_SET(m_state->m_descs.tail, (m_state->m_pending_buffers[last_tail].last_desc + 1) & m_state->m_descs.size_mask);
- CB_DEQUEUE(m_state->m_buffers, processed_no);
-
- if (Direction::H2D == m_direction) {
- m_can_write_buffer_cv.notify_one();
- } else {
- m_can_read_buffer_cv.notify_one();
- }
- callback(processed_no);
- }
-
- m_is_waiting_for_channel_completion = false;
- return HAILO_SUCCESS;
-}
-
-bool VdmaChannel::is_ready_for_transfer_h2d(size_t buffer_size)
-{
- assert(m_state);
- assert(m_buffer);
-
- size_t desired_desc_num = m_buffer->descriptors_in_buffer(buffer_size);
- assert(desired_desc_num <= MAX_DESCS_COUNT);
- int desc_num = static_cast<int>(desired_desc_num);
-
- int buffers_head = CB_HEAD(m_state->m_buffers);
- int buffers_tail = CB_TAIL(m_state->m_buffers);
- if (!CB_AVAIL(m_state->m_buffers, buffers_head, buffers_tail)) {
- return false;
- }
-
- int num_available = get_num_available();
- int num_processed = CB_TAIL(m_state->m_descs);
-
- if (desc_num == m_state->m_descs.size) {
- // Special case when the checking if the buffer is empty
- return num_available == num_processed;
- }
-
- int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
- if (num_free < desc_num) {
- return false;
- }
-
- return true;
-}
-
-bool VdmaChannel::is_ready_for_transfer_d2h(size_t buffer_size)
-{
- assert(m_state);
- assert(m_buffer);
-
- size_t desired_desc_num = m_buffer->descriptors_in_buffer(buffer_size);
- assert(desired_desc_num <= MAX_DESCS_COUNT);
- int desc_num = static_cast<int>(desired_desc_num);
-
- int buffers_head = CB_HEAD(m_state->m_buffers);
- int buffers_tail = CB_TAIL(m_state->m_buffers);
- if (!CB_AVAIL(m_state->m_buffers, buffers_head, buffers_tail)) {
- return false;
- }
-
- int num_processed = CB_TAIL(m_state->m_descs);
- int num_ready = CB_PROG(m_state->m_descs, num_processed, m_state->m_d2h_read_desc_index);
- if (num_ready < desc_num) {
- return false;
- }
- return true;
-}
-
-hailo_status VdmaChannel::prepare_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain)
-{
- assert(m_buffer);
- assert(m_state);
- auto desc_info = m_buffer->get_desc_list();
-
- /* calculate desired descriptors for the buffer */
- size_t desired_desc_num = m_buffer->descriptors_in_buffer(transfer_size);
- assert(desired_desc_num <= MAX_DESCS_COUNT);
- uint16_t desc_num = static_cast<uint16_t>(desired_desc_num);
-
- int num_available = get_num_available();
- int num_processed = CB_TAIL(m_state->m_descs);
- int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
- if (num_free < desc_num) {
- return HAILO_OUT_OF_DESCRIPTORS;
- }
-
- auto actual_desc_count = desc_info->get().program_descriptors(transfer_size, first_desc_interrupts_domain,
- last_desc_interrupts_domain, num_available, true);
- if (!actual_desc_count) {
- LOGGER__ERROR("Failed to program desc_list for channel {}", m_channel_id);
- return actual_desc_count.status();
- }
- assert (actual_desc_count.value() == desc_num);
- int last_desc_avail = ((num_available + desc_num - 1) & m_state->m_descs.size_mask);
-
- add_pending_buffer(num_available, last_desc_avail);
- return inc_num_available(desc_num);
-}
-
-uint32_t VdmaChannel::calculate_descriptors_count(uint32_t buffer_size)
-{
- return VdmaDescriptorList::calculate_descriptors_count(buffer_size, 1, m_desc_page_size);
-}
-
-bool VdmaChannel::is_desc_between(uint16_t begin, uint16_t end, uint16_t desc)
-{
- if (begin == end) {
- // There is nothing between
- return false;
- }
- if (begin < end) {
- // desc needs to be in [begin, end)
- return (begin <= desc) && (desc < end);
- }
- else {
- // desc needs to be in [0, end) or [begin, m_state->m_descs.size()-1]
- return (desc < end) || (begin <= desc);
- }
-}
-
-Expected<bool> VdmaChannel::is_aborted()
-{
- // Checking if either src side or dst side of the channel are aborted
- auto host_control = m_host_registers.get_control();
- CHECK_EXPECTED(host_control, "Fail to read vdma control register");
- if (vdma_channel_control_is_aborted(host_control.value()) ||
- vdma_channel_control_is_paused(host_control.value())) {
- return true;
- }
-
- auto device_control = m_device_registers.get_control();
- CHECK_EXPECTED(device_control, "Fail to read vdma control register");
- if (vdma_channel_control_is_aborted(device_control.value()) ||
- vdma_channel_control_is_paused(device_control.value())) {
- return true;
- }
-
- return false;
-}
-
-hailo_status VdmaChannel::wait_for_condition(std::function<bool()> condition, std::chrono::milliseconds timeout)
-{
- auto start_time = std::chrono::steady_clock::now();
- std::chrono::milliseconds time_elapsed(0);
- while (timeout > time_elapsed) {
- if (condition()) {
- return HAILO_SUCCESS;
- }
-
- auto status = wait_for_channel_completion(timeout);
- if (HAILO_SUCCESS != status) {
- return status;
- }
-
- time_elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start_time);
- }
-
- return condition() ? HAILO_SUCCESS : HAILO_TIMEOUT;
-}
-
-hailo_status VdmaChannel::wait_for_channel_completion(std::chrono::milliseconds timeout, const std::function<void(uint32_t)> &callback)
-{
- auto hw_num_processed = wait_interrupts(timeout);
- if ((hw_num_processed.status() == HAILO_TIMEOUT) ||
- (hw_num_processed && hw_num_processed.value() == 0)) {
- // We need to check for channel abort in this 2 cases:
- // 1. TIMEOUT - maybe the timeout is a result of channel aborted.
- // 2. hw_num_processed == 0 - In this case we receive an interrupt, but the channel may be
- // aborted. When the channel is aborted, num processed is set to 0.
- auto is_aborted_exp = is_aborted();
- CHECK_EXPECTED_AS_STATUS(is_aborted_exp);
- if (is_aborted_exp.value()) {
- assert(m_state);
- std::lock_guard<State> state_guard(*m_state);
- if (!m_state->m_channel_is_active) {
- return HAILO_STREAM_NOT_ACTIVATED;
- }
-
- LOGGER__CRITICAL("Channel {} was aborted by an external source!", m_channel_id);
- return HAILO_STREAM_ABORTED;
- }
- }
- if ((HAILO_STREAM_ABORTED_BY_USER == hw_num_processed.status()) ||
- (HAILO_STREAM_NOT_ACTIVATED == hw_num_processed.status())) {
- return hw_num_processed.status();
- }
- CHECK_EXPECTED_AS_STATUS(hw_num_processed);
-
- auto status = trigger_channel_completion(hw_num_processed.value(), callback);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- return status;
- }
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-Expected<uint16_t> VdmaChannel::wait_interrupts(std::chrono::milliseconds timeout)
-{
- assert(m_state);
-
- auto irq_data = m_driver.wait_channel_interrupts(m_channel_id, *m_channel_handle, timeout);
- if ((HAILO_STREAM_ABORTED_BY_USER == irq_data.status()) ||
- (HAILO_STREAM_NOT_ACTIVATED == irq_data.status())) {
- LOGGER__INFO("Wait channel interrupts was aborted!");
- return make_unexpected(irq_data.status());
- }
- CHECK_EXPECTED(irq_data);
-
- if (m_latency_meter == nullptr) {
- return get_hw_num_processed();
- }
- else {
- // Fixing desc num_processed (it may be equal to m_state->m_descs.size, in this case we will make it zero)
- for (size_t i = 0; i < irq_data->count; i++) {
- irq_data->timestamp_list[i].desc_num_processed = static_cast<uint16_t>(
- irq_data->timestamp_list[i].desc_num_processed & m_state->m_descs.size_mask);
- }
- return update_latency_meter(irq_data.value());
- }
-}
-
-Expected<uint16_t> VdmaChannel::update_latency_meter(const ChannelInterruptTimestampList ×tamp_list)
-{
- assert(m_state);
-
- uint16_t last_num_processed = m_state->m_last_timestamp_num_processed;
-
- if (timestamp_list.count == 0) {
- // TODO: handle this in the driver level.
- return last_num_processed;
- }
-
- // TODO: now we have more iterations than we need. We know that the pending buffers + the timestamp list
- // are ordered. If pending_buffer[i] is not in any of the timestamps_list[0, 1, ... k], then also pending_buffer[i+1,i+2,...]
- // not in those timestamps
-
- int head = CB_HEAD(m_state->m_buffers);
- int tail = CB_TAIL(m_state->m_buffers);
- int prog = CB_PROG(m_state->m_buffers, head, tail);
-
- for (; prog > 0; prog--, tail = ((tail + 1) & m_state->m_buffers.size_mask)) {
- uint16_t latency_desc = static_cast<uint16_t>(m_state->m_pending_buffers[tail].latency_measure_desc);
- for (size_t i = 0; i < timestamp_list.count; i++) {
- const auto &irq_timestamp = timestamp_list.timestamp_list[i];
- if (is_desc_between(last_num_processed, irq_timestamp.desc_num_processed, latency_desc)) {
- if (m_direction == Direction::H2D) {
- m_latency_meter->add_start_sample(irq_timestamp.timestamp);
- }
- else {
- m_latency_meter->add_end_sample(m_stream_name, irq_timestamp.timestamp);
- }
- break;
- }
- }
- }
-
- m_state->m_last_timestamp_num_processed = timestamp_list.timestamp_list[timestamp_list.count-1].desc_num_processed;
- return std::move(static_cast<uint16_t>(m_state->m_last_timestamp_num_processed));
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_channel.hpp
- * @brief Cordinator of everything related to one channel of Vdma
- *
- * <doc>
- **/
-
-#ifndef _HAILO_VDMA_CHANNEL_HPP_
-#define _HAILO_VDMA_CHANNEL_HPP_
-
-#include "hailo/hailort.h"
-#include "common/circular_buffer.hpp"
-#include "common/latency_meter.hpp"
-#include "vdma_channel_regs.hpp"
-#include "hailo/expected.hpp"
-#include "os/hailort_driver.hpp"
-#include "vdma/sg_buffer.hpp"
-#include "vdma_descriptor_list.hpp"
-#include "vdma/channel_id.hpp"
-#include "hailo/buffer.hpp"
-
-#include <mutex>
-#include <array>
-#include <condition_variable>
-
-namespace hailort
-{
-
-class VdmaChannel final
-{
-public:
- using Direction = HailoRTDriver::DmaDirection;
-
- static Expected<VdmaChannel> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
- uint16_t requested_desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,
- uint16_t transfers_per_axi_intr = 1);
- ~VdmaChannel();
-
- /**
- * Waits until the channel is ready for transfer `buffer_size` bytes.
- * For now only supported in H2D stream.
- * TODO: SDK-15831 support D2H
- *
- * @param[in] buffer_size
- * @param[in] timeout
- */
- hailo_status wait(size_t buffer_size, std::chrono::milliseconds timeout);
-
- hailo_status transfer(void *buf, size_t count);
- // Either write_buffer + send_pending_buffer or transfer (h2d) should be used on a given channel, not both
- hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout, const std::function<bool()> &should_cancel);
- hailo_status send_pending_buffer();
- hailo_status trigger_channel_completion(uint16_t hw_num_processed, const std::function<void(uint32_t)> &callback);
- hailo_status allocate_resources(uint32_t descs_count);
- // Call for boundary channels, after the fw has activted them (via ResourcesManager::enable_state_machine)
- hailo_status complete_channel_activation(uint32_t transfer_size);
- // Libhailort registers the channels to the driver and the FW is responsible for opening and closing them
- hailo_status register_fw_controlled_channel();
- hailo_status unregister_fw_controlled_channel();
- // For D2H channels, we don't buffer data
- // Hence there's nothing to be "flushed" and the function will return with HAILO_SUCCESS
- hailo_status flush(const std::chrono::milliseconds &timeout);
- hailo_status set_num_avail_value(uint16_t new_value);
- hailo_status set_transfers_per_axi_intr(uint16_t transfers_per_axi_intr);
- hailo_status inc_num_available_for_ddr(uint16_t value, uint32_t size_mask);
- Expected<uint16_t> get_hw_num_processed_ddr(uint32_t size_mask);
-
- hailo_status stop_channel();
- uint16_t get_page_size();
- Expected<CONTROL_PROTOCOL__host_buffer_info_t> get_boundary_buffer_info(uint32_t transfer_size);
-
- hailo_status abort();
- hailo_status clear_abort();
-
- class BufferState {
- public:
- std::vector<std::pair<uint16_t, Buffer>> desc_buffer_pairing;
- uint16_t num_avail;
- uint16_t num_processed;
- uint16_t hw_num_avail;
- uint16_t hw_num_processed;
- };
- // Assumes that the channel is idle; doesn't block changes to the channel
- // To be used for debugging purposes
- Expected<BufferState> get_buffer_state();
-
- // To be used for debugging purposes
- hailo_status sync_state(std::chrono::milliseconds timeout);
-
- vdma::ChannelId get_channel_id() const
- {
- return m_channel_id;
- }
-
- size_t get_transfers_count_in_buffer(size_t transfer_size);
- size_t get_buffer_size() const;
- Expected<size_t> get_h2d_pending_frames_count();
- Expected<size_t> get_d2h_pending_descs_count();
-
- VdmaChannel(const VdmaChannel &other) = delete;
- VdmaChannel &operator=(const VdmaChannel &other) = delete;
- VdmaChannel(VdmaChannel &&other) noexcept;
- VdmaChannel &operator=(VdmaChannel &&other) = delete;
-
- static uint32_t calculate_buffer_size(const HailoRTDriver &driver, uint32_t transfer_size, uint32_t transfers_count,
- uint16_t requested_desc_page_size);
-
- hailo_status register_for_d2h_interrupts(const std::function<void(uint32_t)> &callback);
-
- void notify_all();
-
-private:
- struct PendingBuffer {
- uint32_t last_desc;
- uint32_t latency_measure_desc;
- };
-
- // TODO (HRT-3762) : Move channel's state to driver to avoid using shared memory
- class State {
- public:
-
- void lock();
- void unlock();
-
-#ifndef _MSC_VER
- pthread_mutex_t m_state_lock;
-#else
- CRITICAL_SECTION m_state_lock;
-#endif
- std::array<PendingBuffer, PENDING_BUFFERS_SIZE> m_pending_buffers;
- circbuf_t m_buffers;
- // TODO: describe why we must have our own num_available and num_proc.
- // it's not just for efficiency but its critical to avoid a potential bug - see Avigail email.
- // TODO: Consider C11 stdatomic
- circbuf_t m_descs;
- int m_d2h_read_desc_index;
- // TODO: We want to refactor this class + VdmaChannel so that logic related to write_buffer + send_pending_buffer will
- // be in another class.
- // Points to the tail of the desc list when the channel is stopped (starts at zero)
- // When calling VdmaChannel::write_buffer, buffers will be appended relative to this index (+ the current num_avail)
- // We'll set it if there are pending buffers to be sent or if m_should_reprogram_buffer is set
- int m_previous_tail;
- bool m_should_reprogram_buffer;
- // Contains the last num_processed of the last interrupt (only used on latency measurement)
- uint16_t m_last_timestamp_num_processed;
- size_t m_accumulated_transfers;
- bool m_channel_is_active;
- };
-
- hailo_status register_channel_to_driver();
- hailo_status unregister_for_d2h_interrupts(std::unique_lock<State> &lock);
-
- VdmaChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, const std::string &stream_name,
- LatencyMeterPtr latency_meter, uint16_t desc_page_size, uint16_t transfers_per_axi_intr, hailo_status &status);
-
- hailo_status allocate_buffer(const uint32_t buffer_size);
- void clear_descriptor_list();
- hailo_status release_buffer();
- static Direction other_direction(const Direction direction);
- hailo_status transfer_h2d(void *buf, size_t count);
- hailo_status write_buffer_impl(const MemoryView &buffer);
- hailo_status send_pending_buffer_impl();
- uint16_t get_num_available();
- Expected<uint16_t> get_hw_num_processed();
- void add_pending_buffer(uint32_t first_desc, uint32_t last_desc);
- hailo_status inc_num_available(uint16_t value);
- hailo_status transfer_d2h(void *buf, size_t count);
- bool is_ready_for_transfer_h2d(size_t buffer_size);
- bool is_ready_for_transfer_d2h(size_t buffer_size);
- hailo_status prepare_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain);
- hailo_status prepare_d2h_pending_descriptors(uint32_t transfer_size);
- void reset_internal_counters();
- hailo_status wait_for_channel_completion(std::chrono::milliseconds timeout, const std::function<void(uint32_t)> &callback = [](uint32_t) { return; });
-
- uint32_t calculate_descriptors_count(uint32_t buffer_size);
-
- hailo_status wait_for_condition(std::function<bool()> condition, std::chrono::milliseconds timeout);
-
- void wait_d2h_callback(const std::function<void(uint32_t)> &callback);
- std::unique_ptr<std::thread> m_d2h_callback_thread;
-
- /**
- * Returns the new hw num_processed of the irq
- */
- Expected<uint16_t> wait_interrupts(std::chrono::milliseconds timeout);
-
- /**
- * Returns the new hw num processed.
- */
- Expected<uint16_t> update_latency_meter(const ChannelInterruptTimestampList ×tamp_list);
- static bool is_desc_between(uint16_t begin, uint16_t end, uint16_t desc);
- Expected<bool> is_aborted();
-
- const vdma::ChannelId m_channel_id;
- Direction m_direction;
- HailoRTDriver &m_driver;
- VdmaChannelRegs m_host_registers;
- VdmaChannelRegs m_device_registers;
-
- // TODO: use m_descriptors_buffer.desc_page_size()
- const uint16_t m_desc_page_size;
-
- // TODO: remove the unique_ptr, instead allocate the buffer in the ctor (needs to move ddr channel to
- // other class)
- std::unique_ptr<vdma::SgBuffer> m_buffer;
- const std::string m_stream_name;
- LatencyMeterPtr m_latency_meter;
-
- MmapBuffer<State> m_state;
- // Unique channel handle, may be changed between registration to driver. This object is shared
- // because multiple processes can enable/disable vdma channel (which changes the channel)
- MmapBuffer<HailoRTDriver::VdmaChannelHandle> m_channel_handle;
-
- bool m_channel_enabled;
-
- uint16_t m_transfers_per_axi_intr;
- // Using CircularArray because it won't allocate or free memory wile pushing and poping. The fact that it is circural is not relevant here
- CircularArray<size_t> m_pending_buffers_sizes;
- std::atomic_uint16_t m_pending_num_avail_offset;
- std::condition_variable_any m_can_write_buffer_cv;
- std::condition_variable_any m_can_read_buffer_cv;
- std::atomic_bool m_is_waiting_for_channel_completion;
- std::atomic_bool m_is_aborted_by_internal_source;
-};
-
-} /* namespace hailort */
-
-#endif // _HAILO_VDMA_CHANNEL_HPP_
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_channel_regs.hpp
- * @brief utilties used to parse/modify PLDA Vdma channel registers
- **/
-
-#ifndef _HAILO_VDMA_CHANNEL__REGS_HPP_
-#define _HAILO_VDMA_CHANNEL__REGS_HPP_
-
-#include "hw_consts.hpp"
-#include "hailo/expected.hpp"
-#include "os/hailort_driver.hpp"
-
-#include <cstdint>
-
-namespace hailort
-{
-
-#define DESCPRIPTOR_LIST_MAX_DEPTH (16)
-
-inline bool vdma_channel_control_is_aborted(uint8_t control_reg)
-{
- return (control_reg & 1) == 0;
-}
-
-inline bool vdma_channel_control_is_paused(uint8_t control_reg)
-{
- return (control_reg & 2) == 2;
-}
-
-class VdmaChannelRegs final {
-public:
- VdmaChannelRegs(HailoRTDriver &driver, vdma::ChannelId channel_id, HailoRTDriver::DmaDirection direction) :
- m_driver(driver),
- m_channel_id(channel_id),
- m_direction(direction)
- {}
-
- Expected<uint8_t> get_control()
- {
- return read_integer<uint8_t>(VDMA_CHANNEL_CONTROL_OFFSET);
- }
-
- Expected<uint16_t> get_num_available()
- {
- return read_integer<uint16_t>(VDMA_CHANNEL_NUM_AVAIL_OFFSET);
- }
-
- hailo_status set_num_available(uint16_t value)
- {
- return write_integer<uint16_t>(VDMA_CHANNEL_NUM_AVAIL_OFFSET, value);
- }
-
- Expected<uint16_t> get_num_processed()
- {
- return read_integer<uint16_t>(VDMA_CHANNEL_NUM_PROC_OFFSET);
- }
-
- Expected<uint8_t> get_channel_error()
- {
- return read_integer<uint8_t>(VDMA_CHANNEL_ERROR_OFFSET);
- }
-
- hailo_status stop_channel()
- {
- auto reg_control = get_control();
- CHECK_EXPECTED_AS_STATUS(reg_control, "Fail to read vdma control register");
-
- // First pause channel
- auto status = set_control((reg_control.value() & 0xFC) | 0x3);
- CHECK_SUCCESS(status, "Fail to write vdma control register");
-
- std::this_thread::sleep_for(std::chrono::microseconds(2));
-
- // Then abort
- status = set_control((reg_control.value() & 0xFC) | 0x0);
- CHECK_SUCCESS(status, "Fail to write vdma control register");
-
- return HAILO_SUCCESS;
- }
-
-private:
-
- template<typename IntegerType>
- Expected<IntegerType> read_integer(uint32_t offset)
- {
- auto value = m_driver.read_vdma_channel_register(m_channel_id, m_direction, offset, sizeof(IntegerType));
- CHECK_EXPECTED(value);
- return static_cast<IntegerType>(value.release());
- }
-
- hailo_status set_control(uint8_t value)
- {
- return write_integer<uint8_t>(VDMA_CHANNEL_CONTROL_OFFSET, value);
- }
-
- template<typename IntegerType>
- hailo_status write_integer(uint32_t offset, IntegerType value)
- {
- return m_driver.write_vdma_channel_register(m_channel_id, m_direction, offset, sizeof(value), value);
- }
-
- HailoRTDriver &m_driver;
- const vdma::ChannelId m_channel_id;
- const HailoRTDriver::DmaDirection m_direction;
-};
-
-} /* namespace hailort */
-
-#endif /*_HAILO_VDMA_CHANNEL__REGS_HPP_ */
\ No newline at end of file
+++ /dev/null
-#include "vdma_descriptor_list.hpp"
-
-#define DESC_STATUS_REQ (1 << 0)
-#define DESC_STATUS_REQ_ERR (1 << 1)
-#define DESC_REQUREST_IRQ_PROCESSED (1 << 2)
-#define DESC_REQUREST_IRQ_ERR (1 << 3)
-
-#define PCIE_DMA_HOST_INTERRUPTS_BITMASK (1 << 5)
-#define PCIE_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 4)
-
-#define DRAM_DMA_HOST_INTERRUPTS_BITMASK (1 << 4)
-#define DRAM_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 5)
-
-#define DESC_PAGE_SIZE_SHIFT (8)
-#define DESC_PAGE_SIZE_MASK (0xFFFFFF00)
-#define DESC_IRQ_MASK (0x0000003C)
-
-namespace hailort
-{
-
-Expected<VdmaDescriptorList> VdmaDescriptorList::create(uint32_t desc_count, uint16_t requested_desc_page_size,
- HailoRTDriver &driver)
-{
- hailo_status status = HAILO_UNINITIALIZED;
- auto desc_page_size_value = driver.calc_desc_page_size(requested_desc_page_size);
- VdmaDescriptorList object(desc_count, driver, desc_page_size_value, status);
- if (HAILO_SUCCESS != status) {
- return make_unexpected(status);
- }
-
- return object;
-}
-
-VdmaDescriptorList::VdmaDescriptorList(uint32_t desc_count, HailoRTDriver &driver, uint16_t desc_page_size,
- hailo_status &status) :
- m_mapped_list(),
- m_count(desc_count),
- m_depth(0),
- m_desc_handle(0),
- m_dma_address(0),
- m_driver(driver),
- m_desc_page_size(desc_page_size)
-{
- if (!is_powerof2(desc_count)) {
- LOGGER__ERROR("Descriptor count ({}) must be power of 2", desc_count);
- status = HAILO_INVALID_ARGUMENT;
- return;
- }
-
- auto depth = calculate_desc_list_depth(desc_count);
- if (!depth) {
- status = depth.status();
- return;
- }
- m_depth = depth.value();
-
- auto desc_handle_phys_addr_pair = m_driver.descriptors_list_create(desc_count);
- if (!desc_handle_phys_addr_pair) {
- status = desc_handle_phys_addr_pair.status();
- return;
- }
-
- m_desc_handle = desc_handle_phys_addr_pair->first;
- m_dma_address = desc_handle_phys_addr_pair->second;
-
- auto mapped_list = MmapBuffer<VdmaDescriptor>::create_file_map(desc_count * sizeof(VdmaDescriptor), m_driver.fd(), m_desc_handle);
- if (!mapped_list) {
- LOGGER__ERROR("Failed to memory map descriptors. desc handle: {:X}", m_desc_handle);
- status = mapped_list.status();
- return;
- }
-
- m_mapped_list = mapped_list.release();
- status = HAILO_SUCCESS;
-}
-
-VdmaDescriptorList::~VdmaDescriptorList()
-{
- if (HAILO_SUCCESS != m_mapped_list.unmap()) {
- LOGGER__ERROR("Failed to release descriptors mapping");
- }
-
- // Note: The descriptors_list is freed by the desc_handle (no need to use the phys_address to free)
- if (0 != m_desc_handle) {
- if(HAILO_SUCCESS != m_driver.descriptors_list_release(m_desc_handle)) {
- LOGGER__ERROR("Failed to release descriptor list {}", m_desc_handle);
- }
- }
-}
-
-VdmaDescriptorList::VdmaDescriptorList(VdmaDescriptorList &&other) noexcept :
- m_mapped_list(std::move(other.m_mapped_list)),
- m_count(std::move(other.m_count)),
- m_depth(std::move(other.m_depth)),
- m_desc_handle(std::exchange(other.m_desc_handle, 0)),
- m_dma_address(std::exchange(other.m_dma_address, 0)),
- m_driver(other.m_driver),
- m_desc_page_size(other.m_desc_page_size) {}
-
-Expected<uint8_t> VdmaDescriptorList::calculate_desc_list_depth(size_t count)
-{
- // Calculate log2 of m_count (by finding the offset of the MSB)
- uint32_t depth = 0;
- while (count >>= 1) {
- ++depth;
- }
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(depth), HAILO_INTERNAL_FAILURE, "Calculated desc_list_depth is too big: {}", depth);
- return static_cast<uint8_t>(depth);
-}
-
-hailo_status VdmaDescriptorList::configure_to_use_buffer(vdma::MappedBuffer& buffer, uint8_t channel_index, size_t offset)
-{
- return m_driver.descriptors_list_bind_vdma_buffer(m_desc_handle, buffer.handle(), m_desc_page_size,
- channel_index, offset);
-}
-
-hailo_status VdmaDescriptorList::configure_to_use_buffer(vdma::MappedBuffer& buffer, size_t offset)
-{
- return configure_to_use_buffer(buffer, HailoRTDriver::INVALID_VDMA_CHANNEL_INDEX, offset);
-}
-
-Expected<uint16_t> VdmaDescriptorList::program_descriptors(size_t transfer_size,
- VdmaInterruptsDomain first_desc_interrupts_domain, VdmaInterruptsDomain last_desc_interrupts_domain,
- size_t desc_offset, bool is_circular)
-{
- assert(transfer_size > 0);
- const auto required_descriptors = descriptors_in_buffer(transfer_size);
- // Required_descriptors + desc_offset can't reach m_count. We need to keep at least 1 free desc at all time.
- if ((!is_circular) && ((required_descriptors + desc_offset) >= m_count)){
- LOGGER__ERROR("Requested transfer size ({}) result in more descriptors than available ({})", transfer_size, m_count);
- return make_unexpected(HAILO_OUT_OF_DESCRIPTORS);
- }
-
- size_t desc_index = desc_offset;
- for (size_t i = 0; i < required_descriptors - 1; ++i) {
- const auto interrupts_domain = (i == 0) ? first_desc_interrupts_domain : VdmaInterruptsDomain::NONE;
- program_single_descriptor((*this)[desc_index], m_desc_page_size, interrupts_domain);
- desc_index = (desc_index + 1) & (m_count - 1);
- }
-
- /* write residue page with the remaining buffer size*/
- auto resuide = transfer_size - (required_descriptors - 1) * m_desc_page_size;
- assert(IS_FIT_IN_UINT16(resuide));
- program_single_descriptor((*this)[desc_index], static_cast<uint16_t>(resuide), last_desc_interrupts_domain);
-
- return std::move(static_cast<uint16_t>(required_descriptors));
-}
-
-hailo_status VdmaDescriptorList::reprogram_descriptor_interrupts_domain(size_t desc_index,
- VdmaInterruptsDomain interrupts_domain)
-{
- if (desc_index >= m_count){
- LOGGER__ERROR("Requested desc (index={}) exceeds the number of descriptors in the list ({})", desc_index, m_count);
- return HAILO_OUT_OF_DESCRIPTORS;
- }
- reprogram_single_descriptor_interrupts_domain((*this)[desc_index], interrupts_domain);
- return HAILO_SUCCESS;
-}
-
-uint32_t VdmaDescriptorList::descriptors_in_buffer(size_t buffer_size) const
-{
- return descriptors_in_buffer(buffer_size, m_desc_page_size);
-}
-
-uint32_t VdmaDescriptorList::descriptors_in_buffer(size_t buffer_size, uint16_t desc_page_size)
-{
- assert(buffer_size < std::numeric_limits<uint32_t>::max());
- return static_cast<uint32_t>(DESCRIPTORS_IN_BUFFER(buffer_size, desc_page_size));
-}
-
-uint32_t VdmaDescriptorList::calculate_descriptors_count(uint32_t buffer_size, uint16_t batch_size, uint16_t desc_page_size)
-{
- // Because we use cyclic buffer, the amount of active descs is lower by one that the amount
- // of descs given (Otherwise we won't be able to determine if the buffer is empty or full).
- // Therefore we add 1 in order to compensate.
- uint32_t descs_count = std::min(((descriptors_in_buffer(buffer_size, desc_page_size) * batch_size) + 1),
- MAX_DESCS_COUNT);
-
- return get_nearest_powerof_2(descs_count, MIN_DESCS_COUNT);
-}
-
-Expected<std::pair<uint16_t, uint32_t>> VdmaDescriptorList::get_desc_buffer_sizes_for_single_transfer(
- const HailoRTDriver &driver, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size)
-{
- // Note: If the pages pointed to by the descriptors are copied in their entirety, then DEFAULT_DESC_PAGE_SIZE
- // is the optimal value. For transfer_sizes smaller than DEFAULT_DESC_PAGE_SIZE using smaller descriptor page
- // sizes will save memory consuption without harming performance. In the case of nms for example, only one bbox
- // is copied from each page. Hence, we'll use MIN_DESC_PAGE_SIZE for nms.
- const uint32_t initial_desc_page_size = (DEFAULT_DESC_PAGE_SIZE > transfer_size) ?
- get_nearest_powerof_2(transfer_size, MIN_DESC_PAGE_SIZE) : DEFAULT_DESC_PAGE_SIZE;
- if (DEFAULT_DESC_PAGE_SIZE != initial_desc_page_size) {
- LOGGER__INFO("Using non-default initial_desc_page_size of {}, due to a small transfer size ({})",
- initial_desc_page_size, transfer_size);
- }
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(initial_desc_page_size), HAILO_INTERNAL_FAILURE,
- "Descriptor page size needs to fit in 16B");
-
- return get_desc_buffer_sizes_for_single_transfer_impl(driver, min_batch_size, max_batch_size, transfer_size,
- static_cast<uint16_t>(initial_desc_page_size));
-}
-
-Expected<std::pair<uint16_t, uint32_t>> VdmaDescriptorList::get_desc_buffer_sizes_for_multiple_transfers(
- const HailoRTDriver &driver, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes)
-{
- return get_desc_buffer_sizes_for_multiple_transfers_impl(driver, batch_size, transfer_sizes,
- DEFAULT_DESC_PAGE_SIZE);
-}
-
-Expected<std::pair<uint16_t, uint32_t>> VdmaDescriptorList::get_desc_buffer_sizes_for_single_transfer_impl(
- const HailoRTDriver &driver, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size,
- uint16_t initial_desc_page_size)
-{
- auto results = VdmaDescriptorList::get_desc_buffer_sizes_for_multiple_transfers_impl(driver, min_batch_size,
- {transfer_size}, initial_desc_page_size);
- CHECK_EXPECTED(results);
-
- auto page_size = results->first;
-
- auto desc_count = std::min(MAX_DESCS_COUNT,
- VdmaDescriptorList::calculate_descriptors_count(transfer_size, max_batch_size, page_size));
-
- return std::make_pair(page_size, desc_count);
-}
-
-Expected<std::pair<uint16_t, uint32_t>> VdmaDescriptorList::get_desc_buffer_sizes_for_multiple_transfers_impl(
- const HailoRTDriver &driver, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes,
- uint16_t initial_desc_page_size)
-{
- const uint16_t min_desc_page_size = driver.calc_desc_page_size(MIN_DESC_PAGE_SIZE);
- const uint16_t max_desc_page_size = driver.calc_desc_page_size(MAX_DESC_PAGE_SIZE);
- // Defined as uint32_t to prevent overflow (as we multiply it by two in each iteration of the while loop bellow)
- uint32_t local_desc_page_size = driver.calc_desc_page_size(initial_desc_page_size);
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(local_desc_page_size), HAILO_INTERNAL_FAILURE,
- "Descriptor page size needs to fit in 16B");
- CHECK_AS_EXPECTED(local_desc_page_size <= max_desc_page_size, HAILO_INTERNAL_FAILURE,
- "Initial descriptor page size ({}) is larger than maximum descriptor page size ({})",
- local_desc_page_size, max_desc_page_size);
- CHECK_AS_EXPECTED(local_desc_page_size >= min_desc_page_size, HAILO_INTERNAL_FAILURE,
- "Initial descriptor page size ({}) is smaller than minimum descriptor page size ({})",
- local_desc_page_size, min_desc_page_size);
-
- uint32_t acc_desc_count = 0;
- for (const auto &transfer_size : transfer_sizes) {
- acc_desc_count +=
- VdmaDescriptorList::descriptors_in_buffer(transfer_size, static_cast<uint16_t>(local_desc_page_size));
- }
-
- // Too many descriptors; try a larger desc_page_size which will lead to less descriptors used
- while ((acc_desc_count * batch_size) > (MAX_DESCS_COUNT - 1)) {
- local_desc_page_size <<= 1;
-
- CHECK_AS_EXPECTED(local_desc_page_size <= max_desc_page_size, HAILO_OUT_OF_DESCRIPTORS,
- "Network shapes and batch size exceeds driver descriptors capabilities. "
- "Required descriptors count: {}, max allowed on the driver: {}. (A common cause for this error could be the"
- "Batch size - which is {}).",
- (batch_size * acc_desc_count), MAX_DESCS_COUNT, batch_size);
-
- CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(local_desc_page_size), HAILO_INTERNAL_FAILURE,
- "Descriptor page size needs to fit in 16B");
-
- acc_desc_count = 0;
- for (auto &transfer_size : transfer_sizes) {
- acc_desc_count +=
- VdmaDescriptorList::descriptors_in_buffer(transfer_size, static_cast<uint16_t>(local_desc_page_size));
- }
- }
-
- // Found desc_page_size and acc_desc_count
- const auto desc_page_size = static_cast<uint16_t>(local_desc_page_size);
-
- // Find descs_count
- const auto descs_count = get_nearest_powerof_2(acc_desc_count, MIN_DESCS_COUNT);
- CHECK_AS_EXPECTED(descs_count <= MAX_DESCS_COUNT, HAILO_OUT_OF_DESCRIPTORS);
-
- if (initial_desc_page_size != desc_page_size) {
- LOGGER__WARNING("Desc page size value ({}) is not optimal for performance.", desc_page_size);
- }
-
- return std::make_pair(desc_page_size, descs_count);
-}
-
-uint32_t VdmaDescriptorList::get_interrupts_bitmask(VdmaInterruptsDomain interrupts_domain)
-{
- uint32_t host_bitmask = 0;
- uint32_t device_bitmask = 0;
-
- switch (m_driver.dma_type()) {
- case HailoRTDriver::DmaType::PCIE:
- host_bitmask = PCIE_DMA_HOST_INTERRUPTS_BITMASK;
- device_bitmask = PCIE_DMA_DEVICE_INTERRUPTS_BITMASK;
- break;
- case HailoRTDriver::DmaType::DRAM:
- host_bitmask = DRAM_DMA_HOST_INTERRUPTS_BITMASK;
- device_bitmask = DRAM_DMA_DEVICE_INTERRUPTS_BITMASK;
- break;
- default:
- assert(false);
- }
-
- uint32_t bitmask = 0;
- if (host_interuptes_enabled(interrupts_domain)) {
- bitmask |= host_bitmask;
- }
- if (device_interuptes_enabled(interrupts_domain)) {
- bitmask |= device_bitmask;
- }
-
- return bitmask;
-}
-
-void VdmaDescriptorList::program_single_descriptor(VdmaDescriptor &descriptor, uint16_t page_size,
- VdmaInterruptsDomain interrupts_domain)
-{
- descriptor.PageSize_DescControl = 0;
- // Update the descriptor's PAGE_SIZE field in the control register with the maximum size of the DMA page.
- descriptor.PageSize_DescControl |=
- (uint32_t)(page_size << DESC_PAGE_SIZE_SHIFT) & (uint32_t)DESC_PAGE_SIZE_MASK;
-
- if (VdmaInterruptsDomain::NONE != interrupts_domain) {
- // Update the desc_control
- descriptor.PageSize_DescControl |= (DESC_REQUREST_IRQ_PROCESSED | DESC_REQUREST_IRQ_ERR |
- get_interrupts_bitmask(interrupts_domain));
-#ifndef NDEBUG
- descriptor.PageSize_DescControl |= (DESC_STATUS_REQ | DESC_STATUS_REQ_ERR);
-#endif
- }
-
- // Clear status
- descriptor.RemainingPageSize_Status = 0;
-}
-
-void VdmaDescriptorList::reprogram_single_descriptor_interrupts_domain(VdmaDescriptor &descriptor,
- VdmaInterruptsDomain interrupts_domain)
-{
- // Set the IRQ control bits to zero
- descriptor.PageSize_DescControl &= ~DESC_IRQ_MASK;
-
- if (VdmaInterruptsDomain::NONE == interrupts_domain) {
- // Nothing else to do
- return;
- }
-
- descriptor.PageSize_DescControl |= (DESC_REQUREST_IRQ_PROCESSED | DESC_REQUREST_IRQ_ERR |
- get_interrupts_bitmask(interrupts_domain));
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_descriptor_list.hpp
- * @brief Allocates a list of buffer descriptors used for VDMA
- *
- **/
-
-#ifndef _HAILO_VDMA_DESCRIPTOR_LIST_HPP_
-#define _HAILO_VDMA_DESCRIPTOR_LIST_HPP_
-
-#include "os/hailort_driver.hpp"
-#include "hailo/expected.hpp"
-#include "os/mmap_buffer.hpp"
-#include "vdma/mapped_buffer.hpp"
-#include "common/utils.hpp"
-
-namespace hailort
-{
-
-
-#define MAX_DESCS_COUNT (64 * 1024u)
-#define MIN_DESCS_COUNT (2u)
-#define DEFAULT_DESC_COUNT (64 * 1024u)
-
-static_assert(is_powerof2(MAX_DESCS_COUNT), "MAX_DESCS_COUNT must be a power of 2");
-static_assert(is_powerof2(MIN_DESCS_COUNT), "MIN_DESCS_COUNT must be a power of 2");
-static_assert(is_powerof2(DEFAULT_DESC_COUNT), "DEFAULT_DESC_COUNT must be a power of 2");
-static_assert(DEFAULT_DESC_COUNT <= MAX_DESCS_COUNT && DEFAULT_DESC_COUNT >= MIN_DESCS_COUNT,
- "DEFAULT_DESC_COUNT not in range");
-
-// From PLDA's vDMA controller reference:
-// - Addresses of pages pointed to by vDMA descriptors need to be on a 64B boundry.
-// Hence, we require a minimum page size of 64B.
-// - G_PAGE_SIZE_MAX dictates the maximum desc page size:
-// max_page_size = 2 ^ (G_PAGE_SIZE_MAX - 1)
-// In our case max_page_size = 2 ^ (13 - 1) = 4096
-#define MIN_DESC_PAGE_SIZE (64u)
-// TODO: Calculate from G_PAGE_SIZE_MAX (I.e. read the reg etc.)
-#define MAX_DESC_PAGE_SIZE (4096u)
-#define DEFAULT_DESC_PAGE_SIZE (512u)
-
-static_assert(is_powerof2(MIN_DESC_PAGE_SIZE), "MIN_DESC_PAGE_SIZE must be a power of 2");
-static_assert(MIN_DESC_PAGE_SIZE > 0, "MIN_DESC_PAGE_SIZE must be larger then 0");
-static_assert(is_powerof2(MAX_DESC_PAGE_SIZE), "MAX_DESC_PAGE_SIZE must be a power of 2");
-static_assert(MAX_DESC_PAGE_SIZE > 0, "MAX_DESC_PAGE_SIZE must be larger then 0");
-static_assert(is_powerof2(DEFAULT_DESC_PAGE_SIZE), "DEFAULT_DESC_PAGE_SIZE must be a power of 2");
-static_assert(DEFAULT_DESC_PAGE_SIZE > 0, "DEFAULT_DESC_PAGE_SIZE must be larger then 0");
-
-
-struct VdmaDescriptor
-{
- uint32_t PageSize_DescControl;
- uint32_t AddrL_rsvd_DataID;
- uint32_t AddrH;
- uint32_t RemainingPageSize_Status;
-};
-
-enum class VdmaInterruptsDomain
-{
- NONE = 0,
- DEVICE = 1 << 0,
- HOST = 1 << 1,
- BOTH = DEVICE | HOST
-};
-
-inline bool host_interuptes_enabled(VdmaInterruptsDomain interrupts_domain)
-{
- return 0 != (static_cast<uint32_t>(interrupts_domain) & static_cast<uint32_t>(VdmaInterruptsDomain::HOST));
-}
-
-inline bool device_interuptes_enabled(VdmaInterruptsDomain interrupts_domain)
-{
- return 0 != (static_cast<uint32_t>(interrupts_domain) & static_cast<uint32_t>(VdmaInterruptsDomain::DEVICE));
-}
-
-class VdmaDescriptorList
-{
-public:
- static Expected<VdmaDescriptorList> create(uint32_t desc_count, uint16_t requested_desc_page_size,
- HailoRTDriver &driver);
-
- ~VdmaDescriptorList();
-
- VdmaDescriptorList(const VdmaDescriptorList &other) = delete;
- VdmaDescriptorList &operator=(const VdmaDescriptorList &other) = delete;
- VdmaDescriptorList(VdmaDescriptorList &&other) noexcept;
- VdmaDescriptorList &operator=(VdmaDescriptorList &&other) = delete;
-
- uint8_t depth() const
- {
- return m_depth;
- }
-
- uint32_t count() const
- {
- return m_count;
- }
-
- uint64_t dma_address() const
- {
- return m_dma_address;
- }
-
- VdmaDescriptor& operator[](size_t i)
- {
- assert(i < m_count);
- return m_mapped_list[i];
- }
-
- uint16_t desc_page_size() const
- {
- return m_desc_page_size;
- }
-
- uintptr_t handle() const
- {
- return m_desc_handle;
- }
-
- // offset in buffer to which the first desc in this VdmaDescriptorList will point
- // offset must be a multiple of desc_page_size()
- hailo_status configure_to_use_buffer(vdma::MappedBuffer& buffer, uint8_t channel_index, size_t offset = 0);
- // On hailo8, we allow configuring buffer without specific channel index.
- hailo_status configure_to_use_buffer(vdma::MappedBuffer& buffer, size_t offset = 0);
-
- Expected<uint16_t> program_descriptors(size_t transfer_size, VdmaInterruptsDomain first_desc_interrupts_domain,
- VdmaInterruptsDomain last_desc_interrupts_domain, size_t desc_offset, bool is_circular);
- hailo_status reprogram_descriptor_interrupts_domain(size_t desc_index, VdmaInterruptsDomain interrupts_domain);
-
- uint32_t descriptors_in_buffer(size_t buffer_size) const;
- static uint32_t descriptors_in_buffer(size_t buffer_size, uint16_t desc_page_size);
- static uint32_t calculate_descriptors_count(uint32_t buffer_size, uint16_t batch_size, uint16_t desc_page_size);
- static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_single_transfer(const HailoRTDriver &driver,
- uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size);
- static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_multiple_transfers(const HailoRTDriver &driver,
- uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes);
-
-private:
- VdmaDescriptorList(uint32_t desc_count, HailoRTDriver &driver, uint16_t desc_page_size, hailo_status &status);
- uint32_t get_interrupts_bitmask(VdmaInterruptsDomain interrupts_domain);
- void program_single_descriptor(VdmaDescriptor &descriptor, uint16_t page_size,
- VdmaInterruptsDomain interrupts_domain);
- void reprogram_single_descriptor_interrupts_domain(VdmaDescriptor &descriptor, VdmaInterruptsDomain interrupts_domain);
- static Expected<uint8_t> calculate_desc_list_depth(size_t count);
- // Note: initial_desc_page_size should be the optimal descriptor page size.
- static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_single_transfer_impl(
- const HailoRTDriver &driver, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size,
- uint16_t initial_desc_page_size);
- static Expected<std::pair<uint16_t, uint32_t>> get_desc_buffer_sizes_for_multiple_transfers_impl(
- const HailoRTDriver &driver, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes,
- uint16_t initial_desc_page_size);
-
- MmapBuffer<VdmaDescriptor> m_mapped_list;
- uint32_t m_count;
- uint8_t m_depth;
- uintptr_t m_desc_handle;
- uint64_t m_dma_address;
- HailoRTDriver &m_driver;
- const uint16_t m_desc_page_size;
-};
-
-} /* namespace hailort */
-
-#endif //_HAILO_VDMA_DESCRIPTOR_LIST_HPP_
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_device.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "vdma_device.hpp"
-#include "vdma_descriptor_list.hpp"
-#include "context_switch/multi_context/vdma_config_manager.hpp"
-#include "pcie_device.hpp"
-#include "core_device.hpp"
-#include "control.hpp"
-#include "context_switch/resource_manager_builder.hpp"
-
-#include <new>
-#include <algorithm>
-
-namespace hailort
-{
-
-#ifndef HAILO_EMULATOR
-static constexpr std::chrono::milliseconds DEFAULT_TIMEOUT(1000);
-#else /* ifndef HAILO_EMULATOR */
-static constexpr std::chrono::milliseconds DEFAULT_TIMEOUT(50000);
-#endif /* ifndef HAILO_EMULATOR */
-
-VdmaDevice::VdmaDevice(HailoRTDriver &&driver, Device::Type type, const std::string &device_id) :
- DeviceBase::DeviceBase(type),
- m_driver(std::move(driver)), m_is_configured(false)
-{
- activate_notifications(device_id);
-}
-
-Expected<std::unique_ptr<VdmaDevice>> VdmaDevice::create(const std::string &device_id)
-{
- const bool DONT_LOG_ON_FAILURE = false;
- if (CoreDevice::DEVICE_ID == device_id) {
- auto device = CoreDevice::create();
- CHECK_EXPECTED(device);;
- return std::unique_ptr<VdmaDevice>(device.release());
- }
- else if (auto pcie_info = PcieDevice::parse_pcie_device_info(device_id, DONT_LOG_ON_FAILURE)) {
- auto device = PcieDevice::create(pcie_info.release());
- CHECK_EXPECTED(device);
- return std::unique_ptr<VdmaDevice>(device.release());
- }
- else {
- LOGGER__ERROR("Invalid device id {}", device_id);
- return make_unexpected(HAILO_INVALID_ARGUMENT);
- }
-}
-
-hailo_status VdmaDevice::wait_for_wakeup()
-{
- return HAILO_SUCCESS;
-}
-
-Expected<D2H_EVENT_MESSAGE_t> VdmaDevice::read_notification()
-{
- auto notification_buffer = m_driver.read_notification();
- if (!notification_buffer.has_value()) {
- return make_unexpected(notification_buffer.status());
- }
-
- D2H_EVENT_MESSAGE_t notification;
- CHECK_AS_EXPECTED(sizeof(notification) >= notification_buffer->size(), HAILO_GET_D2H_EVENT_MESSAGE_FAIL,
- "buffer len is not valid = {}", notification_buffer->size());
- memcpy(¬ification, notification_buffer->data(), notification_buffer->size());
- return notification;
-}
-
-hailo_status VdmaDevice::disable_notifications()
-{
- return m_driver.disable_notifications();
-}
-
-hailo_status VdmaDevice::fw_interact_impl(uint8_t *request_buffer, size_t request_size,
- uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id)
-{
- uint8_t request_md5[PCIE_EXPECTED_MD5_LENGTH];
- MD5_CTX ctx;
-
- MD5_Init(&ctx);
- MD5_Update(&ctx, request_buffer, request_size);
- MD5_Final(request_md5, &ctx);
-
- uint8_t response_md5[PCIE_EXPECTED_MD5_LENGTH];
- uint8_t expected_response_md5[PCIE_EXPECTED_MD5_LENGTH];
-
- auto status = m_driver.fw_control(request_buffer, request_size, request_md5,
- response_buffer, response_size, response_md5,
- DEFAULT_TIMEOUT, cpu_id);
- CHECK_SUCCESS(status, "Failed to send fw control");
-
- MD5_Init(&ctx);
- MD5_Update(&ctx, response_buffer, (*response_size));
- MD5_Final(expected_response_md5, &ctx);
-
- auto memcmp_result = memcmp(expected_response_md5, response_md5, sizeof(response_md5));
- CHECK(0 == memcmp_result, HAILO_INTERNAL_FAILURE, "MD5 validation of control response failed.");
-
- return HAILO_SUCCESS;
-}
-
-Expected<ConfiguredNetworkGroupVector> VdmaDevice::add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params)
-{
- auto status = mark_as_used();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- if (!m_is_configured) {
- // TODO: Do we need this control after fixing HRT-7519?
- // Reset context_switch state machine - it may have been in an active state if a previous VdmaDevice
- // wasn't dtor'd (due to SIGKILL for example)
- static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
- status = Control::reset_context_switch_state_machine(*this, REMOVE_NN_CONFIG_DURING_RESET);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = Control::clear_configured_apps(*this);
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed to clear configured network groups with status {}", status);
-
- m_is_configured = true;
- }
-
- auto device_arch = get_architecture();
- CHECK_EXPECTED(device_arch);
-
- auto partial_clusters_layout_bitmap_exp = Control::get_partial_clusters_layout_bitmap(*this);
- CHECK_EXPECTED(partial_clusters_layout_bitmap_exp);
- auto partial_clusters_layout_bitmap = partial_clusters_layout_bitmap_exp.release();
-
- auto &hef_net_groups = hef.pimpl->network_groups();
- ConfiguredNetworkGroupVector added_network_groups;
- // TODO: can be optimized (add another loop the allocate the network group we're adding)
- added_network_groups.reserve(hef_net_groups.size());
- auto configure_params_copy = configure_params;
- for (const auto &hef_net_group : hef_net_groups) {
- const std::string &network_group_name = HefUtils::get_network_group_name(*hef_net_group, SupportedFeatures());
- auto hef_core_ops = hef.pimpl->core_ops(network_group_name);
- assert(hef_core_ops.size() == 1);
- std::vector<std::shared_ptr<NetworkGroupMetadata>> network_group_metadata_ptrs;
- network_group_metadata_ptrs.reserve(hef_core_ops.size());
- const auto prev_network_group_count = m_network_groups.size();
- const auto total_network_group_count = prev_network_group_count + hef_core_ops.size();
- CHECK_AS_EXPECTED(CONTROL_PROTOCOL__MAX_CONTEXT_SWITCH_APPLICATIONS >= total_network_group_count,
- HAILO_INVALID_OPERATION,
- "Can't add {} network groups from HEF. Currently {} network groups are configured; maximum allowed network groups: {}.",
- hef_core_ops.size(), prev_network_group_count, CONTROL_PROTOCOL__MAX_CONTEXT_SWITCH_APPLICATIONS);
-
- auto hef_arch = hef.pimpl->get_device_arch();
-
- auto current_net_group_index = static_cast<uint8_t>(prev_network_group_count);
- for (const auto &core_op : hef_core_ops) {
- auto expected_partial_core_op = Hef::Impl::get_core_op_per_arch(core_op, hef_arch, device_arch.value(),
- partial_clusters_layout_bitmap);
- CHECK_EXPECTED(expected_partial_core_op);
- auto partial_core_op = expected_partial_core_op.release();
- status = Hef::Impl::validate_core_op_unique_layer_names(*partial_core_op);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- // TODO: keep metadata per core_op (HRT-8639)
- // TODO: decide about core_op names - align with the Compiler
- auto network_group_metadata = hef.pimpl->get_network_group_metadata(network_group_name, partial_clusters_layout_bitmap);
- CHECK_EXPECTED(network_group_metadata);
-
- auto network_group_metadata_ptr = make_shared_nothrow<NetworkGroupMetadata>(network_group_metadata.release());
- CHECK_AS_EXPECTED(nullptr != network_group_metadata_ptr, HAILO_OUT_OF_HOST_MEMORY);
- network_group_metadata_ptrs.push_back(network_group_metadata_ptr);
- }
-
- /* If NG params are present, use them
- If no configure params are given, use default*/
- ConfigureNetworkParams config_params{};
- if (contains(configure_params, network_group_name)) {
- config_params = configure_params_copy.at(network_group_name);
- configure_params_copy.erase(network_group_name);
- } else if (configure_params.empty()) {
- auto stream_interface = get_default_streams_interface();
- CHECK_EXPECTED(stream_interface);
- auto config_params_exp = hef.create_configure_params(stream_interface.value(), network_group_name);
- CHECK_EXPECTED(config_params_exp);
- config_params = config_params_exp.release();
- } else {
- continue;
- }
- /* Validate batch size (network group batch size vs network batch size) */
- status = Hef::Impl::update_network_batch_size(config_params);
- CHECK_SUCCESS_AS_EXPECTED(status);
- auto network_group = create_configured_network_group(network_group_metadata_ptrs,
- hef, config_params, current_net_group_index);
- CHECK_EXPECTED(network_group);
- added_network_groups.emplace_back(network_group.release());
- current_net_group_index++;
- }
- std::string unmatched_keys = "";
- for (const auto &pair : configure_params_copy) {
- unmatched_keys.append(" ");
- unmatched_keys.append(pair.first);
- }
- CHECK_AS_EXPECTED(unmatched_keys.size() == 0, HAILO_INVALID_ARGUMENT,
- "Some network group names in the configuration are not found in the hef file:{}", unmatched_keys);
-
- return added_network_groups;
-}
-
-Expected<std::shared_ptr<ConfiguredNetworkGroup>> VdmaDevice::create_configured_network_group(
- const std::vector<std::shared_ptr<NetworkGroupMetadata>> &network_group_metadatas,
- Hef &hef, const ConfigureNetworkParams &config_params,
- uint8_t network_group_index)
-{
- // TODO: keep metadata per core_op (HRT-8639)
- assert(network_group_metadatas.size() == 1);
- auto network_group_metadata = network_group_metadatas[0];
-
- /* build HEF supported features */
- auto resource_manager = ResourcesManagerBuilder::build(network_group_index,
- *this, get_driver(), config_params, network_group_metadata, hef.pimpl->get_device_arch());
- CHECK_EXPECTED(resource_manager);
-
- auto net_flow_ops = hef.pimpl->post_process_ops(network_group_metadata->network_group_name());
-
- auto net_group = VdmaConfigNetworkGroup::create(m_active_net_group_holder, config_params,
- resource_manager.release(), hef.hash(), network_group_metadata, std::move(net_flow_ops));
-
- auto net_group_ptr = make_shared_nothrow<VdmaConfigNetworkGroup>(net_group.release());
- CHECK_AS_EXPECTED(nullptr != net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- // TODO: move this func into VdmaConfigNetworkGroup c'tor
- auto status = net_group_ptr->create_streams_from_config_params(*this);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- m_network_groups.emplace_back(net_group_ptr);
-
- // Check that all boundary streams were created
- status = hef.pimpl->validate_boundary_streams_were_created(network_group_metadata->network_group_name(), *net_group_ptr);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return Expected<std::shared_ptr<ConfiguredNetworkGroup>>(net_group_ptr);
-}
-
-Expected<size_t> VdmaDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
-{
- size_t read_bytes = 0;
- hailo_status status = HAILO_UNINITIALIZED;
- status = m_driver.read_log(buffer.data(), buffer.size(), &read_bytes, cpu_id);
- CHECK_SUCCESS_AS_EXPECTED(status);
- return read_bytes;
-}
-
-void VdmaDevice::increment_control_sequence()
-{
- // To support multiprocess the sequence must remain 0 which is a number the FW ignores.
- // Otherwise the FW might get the same sequence number from several processes which
- // cause the command to be discarded.
- m_control_sequence = 0;
-}
-
-hailo_reset_device_mode_t VdmaDevice::get_default_reset_mode()
-{
- return HAILO_RESET_DEVICE_MODE_SOFT;
-}
-
-uint16_t VdmaDevice::get_default_desc_page_size() const
-{
- return m_driver.calc_desc_page_size(DEFAULT_DESC_PAGE_SIZE);
-}
-
-hailo_status VdmaDevice::mark_as_used()
-{
- return m_driver.mark_as_used();
-}
-
-VdmaDevice::~VdmaDevice()
-{
- auto status = stop_notification_fetch_thread();
- if (HAILO_SUCCESS != status) {
- LOGGER__WARNING("Stopping notification thread ungracefully");
- }
- if (m_is_configured) {
- status = Control::clear_configured_apps(*this);
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to clear conigured network groups with status {}", status);
- }
- }
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_device.hpp
- * @brief Base class for devices that uses vdma and comunicate using HailoRTDriver
- *
- **/
-
-#ifndef HAILO_VDMA_DEVICE_H_
-#define HAILO_VDMA_DEVICE_H_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "device_internal.hpp"
-#include "context_switch/network_group_internal.hpp"
-#include "os/hailort_driver.hpp"
-
-namespace hailort
-{
-
-class VdmaDevice : public DeviceBase {
-public:
- static Expected<std::unique_ptr<VdmaDevice>> create(const std::string &device_id);
-
- virtual ~VdmaDevice();
-
- virtual hailo_status wait_for_wakeup() override;
- virtual void increment_control_sequence() override;
- virtual hailo_reset_device_mode_t get_default_reset_mode() override;
- uint16_t get_default_desc_page_size() const;
-
- hailo_status mark_as_used();
- virtual Expected<size_t> read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id) override;
-
- HailoRTDriver &get_driver() {
- return std::ref(m_driver);
- };
-
-protected:
- VdmaDevice(HailoRTDriver &&driver, Type type, const std::string &device_id);
-
- virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() override;
- virtual hailo_status disable_notifications() override;
- virtual hailo_status fw_interact_impl(uint8_t *request_buffer, size_t request_size,
- uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id) override;
- virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) override;
-
- HailoRTDriver m_driver;
- std::vector<std::shared_ptr<VdmaConfigNetworkGroup>> m_network_groups;
- ActiveNetGroupHolder m_active_net_group_holder;
- bool m_is_configured;
-
-private:
- Expected<std::shared_ptr<ConfiguredNetworkGroup>> create_configured_network_group(
- const std::vector<std::shared_ptr<NetworkGroupMetadata>> &network_group_metadatas,
- Hef &hef, const ConfigureNetworkParams &config_params,
- uint8_t network_group_index);
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_VDMA_DEVICE_H_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_stream.cpp
- **/
-
-#include "vdma_stream.hpp"
-#include "pcie_stream.hpp"
-#include "core_stream.hpp"
-
-namespace hailort
-{
-
-Expected<std::unique_ptr<VdmaInputStream>> VdmaInputStream::create(VdmaDevice &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event)
-{
- switch (device.get_type()) {
- case Device::Type::PCIE:
- {
- auto local_stream = PcieInputStream::create(device, channel, edge_layer, batch_size,
- network_group_activated_event);
- CHECK_EXPECTED(local_stream);
- return std::unique_ptr<VdmaInputStream>(local_stream.release());
- }
- case Device::Type::CORE:
- {
- auto local_stream = CoreInputStream::create(device, channel, edge_layer, batch_size,
- network_group_activated_event);
- CHECK_EXPECTED(local_stream);
- return std::unique_ptr<VdmaInputStream>(local_stream.release());
- }
- default:
- assert(false);
- LOGGER__ERROR("Invalid device type {}", static_cast<uint8_t>(device.get_type()));
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-}
-
-VdmaInputStream::VdmaInputStream(VdmaDevice &device, std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer, EventPtr network_group_activated_event, uint16_t batch_size,
- std::chrono::milliseconds transfer_timeout,
- hailo_stream_interface_t stream_interface, hailo_status &status) :
- InputStreamBase(edge_layer, stream_interface, std::move(network_group_activated_event), status),
- m_device(&device),
- m_channel(std::move(channel)),
- is_stream_activated(false),
- m_channel_timeout(transfer_timeout),
- m_max_batch_size(batch_size),
- m_dynamic_batch_size(batch_size)
-{
- // Checking status for base class c'tor
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-VdmaInputStream::~VdmaInputStream()
-{
- auto status = HAILO_UNINITIALIZED;
- // We want to stop the vdma channel before closing the stream in the firmware
- // because sending data to a closed stream may terminate the dma engine
- if (this->is_stream_activated) {
- status = VdmaInputStream::deactivate_stream();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
- }
- }
-}
-
-VdmaInputStream::VdmaInputStream(VdmaInputStream &&other) :
- InputStreamBase(std::move(other)),
- m_device(std::move(other.m_device)),
- m_channel(std::move(other.m_channel)),
- is_stream_activated(std::exchange(other.is_stream_activated, false)),
- m_channel_timeout(std::move(other.m_channel_timeout)),
- m_max_batch_size(other.m_max_batch_size),
- m_dynamic_batch_size(other.m_dynamic_batch_size)
-{}
-
-std::chrono::milliseconds VdmaInputStream::get_timeout() const
-{
- return this->m_channel_timeout;
-}
-
-hailo_status VdmaInputStream::set_timeout(std::chrono::milliseconds timeout)
-{
- this->m_channel_timeout = timeout;
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaInputStream::abort()
-{
- return m_channel->abort();
-}
-
-hailo_status VdmaInputStream::clear_abort()
-{
- return m_channel->clear_abort();
-}
-
-hailo_status VdmaInputStream::flush()
-{
- const auto dynamic_batch_size = (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_dynamic_batch_size) ?
- 1 : m_dynamic_batch_size;
- return m_channel->flush(m_channel_timeout * dynamic_batch_size);
-}
-
-hailo_status VdmaInputStream::activate_stream(uint16_t dynamic_batch_size)
-{
- auto status = set_dynamic_batch_size(dynamic_batch_size);
- CHECK_SUCCESS(status);
-
- status = m_channel->complete_channel_activation(0);
- CHECK_SUCCESS(status);
-
- this->is_stream_activated = true;
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaInputStream::deactivate_stream()
-{
- if (!is_stream_activated) {
- return HAILO_SUCCESS;
- }
-
- // Flush is best effort
- auto status = m_channel->flush(VDMA_FLUSH_TIMEOUT);
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Flush input_channel is not needed because channel was aborted. (channel {})", m_channel->get_channel_id());
- status = HAILO_SUCCESS;
- } else if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to flush input_channel. (status {} channel {})", status, m_channel->get_channel_id());
- }
-
- status = m_channel->stop_channel();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to stop channel with status {}", status);
- }
-
- this->is_stream_activated = false;
- return status;
-}
-
-Expected<size_t> VdmaInputStream::sync_write_raw_buffer(const MemoryView &buffer)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- status = m_channel->wait(buffer.size(), m_channel_timeout);
- if ((status == HAILO_STREAM_ABORTED_BY_USER) || (status == HAILO_STREAM_NOT_ACTIVATED)) {
- return make_unexpected(status);
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
- "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = m_channel->transfer((void*)buffer.data(), buffer.size());
- if ((status == HAILO_STREAM_ABORTED_BY_USER) || (status == HAILO_STREAM_NOT_ACTIVATED)) {
- return make_unexpected(status);
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
- "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return buffer.size();
-}
-
-hailo_status VdmaInputStream::write_buffer_only(const MemoryView &buffer,
- const std::function<bool()> &should_cancel)
-{
- std::unique_lock<std::mutex> lock(m_write_only_mutex);
- return m_channel->write_buffer(buffer, m_channel_timeout, should_cancel);
-}
-
-hailo_status VdmaInputStream::send_pending_buffer(size_t device_index)
-{
- std::unique_lock<std::mutex> lock(m_send_pending_mutex);
- CHECK(0 == device_index, HAILO_INVALID_OPERATION);
- hailo_status status = m_channel->wait(get_frame_size(), m_channel_timeout);
- if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
- return status;
- }
- CHECK(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
- "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
- CHECK_SUCCESS(status);
-
- return m_channel->send_pending_buffer();
-}
-
-uint16_t VdmaInputStream::get_dynamic_batch_size() const
-{
- return std::max(m_dynamic_batch_size, static_cast<uint16_t>(1));
-}
-
-const char* VdmaInputStream::get_dev_id() const
-{
- return m_device->get_dev_id();
-}
-
-Expected<VdmaChannel::BufferState> VdmaInputStream::get_buffer_state()
-{
- return m_channel->get_buffer_state();
-}
-
-hailo_status VdmaInputStream::sync_channel_state()
-{
- return m_channel->sync_state(get_timeout());
-}
-
-Expected<size_t> VdmaInputStream::get_buffer_frames_size() const
-{
- return m_channel->get_transfers_count_in_buffer(m_stream_info.hw_frame_size);
-}
-
-Expected<size_t> VdmaInputStream::get_pending_frames_count() const
-{
- return m_channel->get_h2d_pending_frames_count();
-}
-
-hailo_status VdmaInputStream::sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size)
-{
- ASSERT(NULL != buffer);
-
- return sync_write_raw_buffer(MemoryView(static_cast<uint8_t*>(buffer) + offset, size)).status();
-}
-
-hailo_status VdmaInputStream::set_dynamic_batch_size(uint16_t dynamic_batch_size)
-{
- // TODO: use std::max in the configure stage
- if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
- LOGGER__TRACE("max_batch_size is CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE; "
- "Ignoring value of dynamic_batch_size {}", m_dynamic_batch_size);
- return HAILO_SUCCESS;
- }
-
- CHECK(dynamic_batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
- "Dynamic batch size ({}) must be <= than the configured batch size ({})",
- dynamic_batch_size, m_max_batch_size);
-
- if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
- LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size; "
- "Leaving previously set value of {}", m_dynamic_batch_size);
- } else {
- LOGGER__TRACE("Setting stream's dynamic_batch_size to {}", dynamic_batch_size);
- m_dynamic_batch_size = dynamic_batch_size;
-
- const auto status = m_channel->set_transfers_per_axi_intr(m_dynamic_batch_size);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-/** Output stream **/
-Expected<std::unique_ptr<VdmaOutputStream>> VdmaOutputStream::create(VdmaDevice &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event)
-{
- switch (device.get_type()) {
- case Device::Type::PCIE:
- {
- auto local_stream = PcieOutputStream::create(device, channel, edge_layer, batch_size,
- network_group_activated_event);
- CHECK_EXPECTED(local_stream);
- return std::unique_ptr<VdmaOutputStream>(local_stream.release());
- }
- case Device::Type::CORE:
- {
- auto local_stream = CoreOutputStream::create(device, channel, edge_layer, batch_size,
- network_group_activated_event);
- CHECK_EXPECTED(local_stream);
- return std::unique_ptr<VdmaOutputStream>(local_stream.release());
- }
- default:
- assert(false);
- LOGGER__ERROR("Invalid device type {}", static_cast<uint8_t>(device.get_type()));
- return make_unexpected(HAILO_INTERNAL_FAILURE);
- }
-}
-
-VdmaOutputStream::VdmaOutputStream(VdmaDevice &device, std::shared_ptr<VdmaChannel> channel,
- const LayerInfo &edge_layer, EventPtr network_group_activated_event, uint16_t batch_size,
- std::chrono::milliseconds transfer_timeout, hailo_status &status) :
- OutputStreamBase(edge_layer, std::move(network_group_activated_event), status),
- m_device(&device),
- m_channel(std::move(channel)),
- is_stream_activated(false),
- m_transfer_timeout(transfer_timeout),
- m_max_batch_size(batch_size),
- m_dynamic_batch_size(batch_size),
- m_transfer_size(get_transfer_size(m_stream_info))
-{
- // Check status for base class c'tor
- if (HAILO_SUCCESS != status) {
- return;
- }
-
- status = HAILO_SUCCESS;
-}
-
-VdmaOutputStream::VdmaOutputStream(VdmaOutputStream &&other) :
- OutputStreamBase(std::move(other)),
- m_device(std::move(other.m_device)),
- m_channel(std::move(other.m_channel)),
- is_stream_activated(std::exchange(other.is_stream_activated, false)),
- m_transfer_timeout(std::move(other.m_transfer_timeout)),
- m_max_batch_size(other.m_max_batch_size),
- m_dynamic_batch_size(other.m_dynamic_batch_size),
- m_transfer_size(other.m_transfer_size)
-{}
-
-VdmaOutputStream::~VdmaOutputStream()
-{
- // We want to stop the vdma channel before closing the stream in the firmware
- // because sending data to a closed stream may terminate the dma engine
- auto status = HAILO_UNINITIALIZED;
-
- if (this->is_stream_activated) {
- status = VdmaOutputStream::deactivate_stream();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
- }
- }
-}
-
-hailo_status VdmaOutputStream::set_timeout(std::chrono::milliseconds timeout)
-{
- this->m_transfer_timeout = timeout;
- return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds VdmaOutputStream::get_timeout() const
-{
- return this->m_transfer_timeout;
-}
-
-hailo_status VdmaOutputStream::abort()
-{
- return m_channel->abort();
-}
-
-hailo_status VdmaOutputStream::clear_abort()
-{
- return m_channel->clear_abort();
-}
-
-uint16_t VdmaOutputStream::get_dynamic_batch_size() const
-{
- return std::max(m_dynamic_batch_size, static_cast<uint16_t>(1));
-}
-
-const char* VdmaOutputStream::get_dev_id() const
-{
- return m_device->get_dev_id();
-}
-
-Expected<VdmaChannel::BufferState> VdmaOutputStream::get_buffer_state()
-{
- return m_channel->get_buffer_state();
-}
-
-hailo_status VdmaOutputStream::activate_stream(uint16_t dynamic_batch_size)
-{
- auto status = set_dynamic_batch_size(dynamic_batch_size);
- CHECK_SUCCESS(status);
-
- status = m_channel->complete_channel_activation(m_transfer_size);
- CHECK_SUCCESS(status);
-
- this->is_stream_activated = true;
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VdmaOutputStream::register_for_d2h_interrupts(const std::function<void(uint32_t)> &callback)
-{
- return m_channel->register_for_d2h_interrupts(callback);
-}
-
-hailo_status VdmaOutputStream::deactivate_stream()
-{
- if (!is_stream_activated) {
- return HAILO_SUCCESS;
- }
-
- auto status = m_channel->stop_channel();
- if (HAILO_SUCCESS != status) {
- LOGGER__ERROR("Failed to stop channel with status {}", status);
- }
-
- this->is_stream_activated = false;
- return HAILO_SUCCESS;
-}
-
-Expected<size_t> VdmaOutputStream::sync_read_raw_buffer(MemoryView &buffer)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- status = m_channel->wait(buffer.size(), m_transfer_timeout);
- if ((status == HAILO_STREAM_ABORTED_BY_USER) || (status == HAILO_STREAM_NOT_ACTIVATED)) {
- return make_unexpected(status);
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
- "{} (D2H) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_transfer_timeout.count());
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- status = m_channel->transfer(buffer.data(), buffer.size());
- if ((status == HAILO_STREAM_NOT_ACTIVATED) || (status == HAILO_STREAM_ABORTED_BY_USER)) {
- return make_unexpected(status);
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
- "{} (D2H) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_transfer_timeout.count());
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return buffer.size();
-}
-
-hailo_status VdmaOutputStream::read_all(MemoryView &buffer)
-{
- std::unique_lock<std::mutex> lock(m_read_mutex);
- CHECK((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
- "Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
-
- return sync_read_raw_buffer(buffer).status();
-}
-
-uint32_t VdmaOutputStream::get_transfer_size(const hailo_stream_info_t &stream_info)
-{
- // The ppu outputs one bbox per vdma buffer in the case of nms
- return (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) ?
- stream_info.nms_info.bbox_size : stream_info.hw_frame_size;
-}
-
-hailo_status VdmaOutputStream::set_dynamic_batch_size(uint16_t dynamic_batch_size)
-{
- // TODO: use std::max in the configure stage
- if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
- LOGGER__TRACE("max_batch_size is CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE; "
- "Ignoring value of dynamic_batch_size {}", m_dynamic_batch_size);
- return HAILO_SUCCESS;
- }
-
- CHECK(dynamic_batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
- "Dynamic batch size ({}) must be <= than the configured batch size ({})",
- dynamic_batch_size, m_max_batch_size);
-
- if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
- LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size; "
- "Leaving previously set value of {}", m_dynamic_batch_size);
- } else {
- LOGGER__TRACE("Setting stream's dynamic_batch_size to {}", dynamic_batch_size);
- m_dynamic_batch_size = dynamic_batch_size;
-
- const auto status = m_channel->set_transfers_per_axi_intr(m_dynamic_batch_size);
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-Expected<size_t> VdmaOutputStream::get_buffer_frames_size() const
-{
- if (HAILO_FORMAT_ORDER_HAILO_NMS == m_stream_info.format.order) {
- // In NMS, each output frame has different size depending on the number of bboxes found for each class
- // and m_stream_info.hw_frame_size is the max frame size. To know the actual frame size and
- // calculate the number of frames we need to read the content of the buffer (and finding the delimiter for each class in each frame).
- LOGGER__INFO("NMS is not supported in function get_buffer_frames_size()");
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
-
- return m_channel->get_transfers_count_in_buffer(m_stream_info.hw_frame_size);
-}
-
-Expected<size_t> VdmaOutputStream::get_pending_frames_count() const
-{
- if (HAILO_FORMAT_ORDER_HAILO_NMS == m_stream_info.format.order) {
- // In NMS, each output frame has different size depending on the number of bboxes found for each class
- // and m_stream_info.hw_frame_size is the max frame size. To know the actual frame size and
- // calculate the number of frames we need to read the content of the buffer (and finding the delimiter for each class in each frame).
- LOGGER__INFO("NMS is not supported in function get_pending_frames_count()");
- return make_unexpected(HAILO_NOT_AVAILABLE);
- }
-
- auto pending_descs_count = m_channel->get_d2h_pending_descs_count();
- CHECK_EXPECTED(pending_descs_count);
-
- auto channel_page_size = m_channel->get_page_size();
- uint32_t descs_per_frame = (0 == (m_stream_info.hw_frame_size % channel_page_size)) ? (m_stream_info.hw_frame_size / channel_page_size) :
- ((m_stream_info.hw_frame_size / channel_page_size) + 1);
-
- return static_cast<size_t>(std::floor(pending_descs_count.value() / descs_per_frame));
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_stream.hpp
- * @brief Stream object over vDMA channel
- **/
-
-#ifndef _HAILO_VDMA_STREAM_HPP_
-#define _HAILO_VDMA_STREAM_HPP_
-
-#include "stream_internal.hpp"
-#include "vdma_device.hpp"
-#include "vdma_channel.hpp"
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-
-namespace hailort
-{
-constexpr std::chrono::seconds VDMA_FLUSH_TIMEOUT(10);
-
-class VdmaInputStream : public InputStreamBase {
-public:
- static Expected<std::unique_ptr<VdmaInputStream>> create(VdmaDevice &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event);
-
- VdmaInputStream(VdmaInputStream &&other);
- virtual ~VdmaInputStream();
-
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- virtual hailo_status flush() override;
- hailo_status write_buffer_only(const MemoryView &buffer, const std::function<bool()> &should_cancel = []() { return false; });
- hailo_status send_pending_buffer(size_t device_index = 0);
- uint16_t get_dynamic_batch_size() const;
- const char* get_dev_id() const;
- Expected<VdmaChannel::BufferState> get_buffer_state();
- virtual Expected<size_t> get_buffer_frames_size() const override;
- virtual Expected<size_t> get_pending_frames_count() const override;
-
- // To be used for debugging purposes
- hailo_status sync_channel_state();
-
- void notify_all()
- {
- return m_channel->notify_all();
- }
-
-protected:
- VdmaInputStream(VdmaDevice &device, std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer,
- EventPtr network_group_activated_event, uint16_t batch_size,
- std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
- hailo_status &status);
-
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual Expected<size_t> sync_write_raw_buffer(const MemoryView &buffer) override;
- virtual hailo_status sync_write_all_raw_buffer_no_transform_impl(void *buffer, size_t offset, size_t size) override;
-
- VdmaDevice *m_device;
- std::shared_ptr<VdmaChannel> m_channel;
-
-private:
- hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
-
- bool is_stream_activated;
- std::chrono::milliseconds m_channel_timeout;
- const uint16_t m_max_batch_size;
- uint16_t m_dynamic_batch_size;
- std::mutex m_write_only_mutex;
- std::mutex m_send_pending_mutex;
-
- friend class InputVDeviceBaseStream;
- friend class InputVDeviceNativeStream;
-};
-
-class VdmaOutputStream : public OutputStreamBase {
-public:
- static Expected<std::unique_ptr<VdmaOutputStream>> create(VdmaDevice &device,
- std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer, uint16_t batch_size,
- EventPtr network_group_activated_event);
-
- VdmaOutputStream(VdmaOutputStream &&other);
- virtual ~VdmaOutputStream();
-
- virtual std::chrono::milliseconds get_timeout() const override;
- virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
- virtual hailo_status abort() override;
- virtual hailo_status clear_abort() override;
- uint16_t get_dynamic_batch_size() const;
- const char* get_dev_id() const;
- Expected<VdmaChannel::BufferState> get_buffer_state();
- virtual Expected<size_t> get_buffer_frames_size() const override;
- virtual Expected<size_t> get_pending_frames_count() const override;
-
- virtual hailo_status register_for_d2h_interrupts(const std::function<void(uint32_t)> &callback);
-
-protected:
- VdmaOutputStream(VdmaDevice &device, std::shared_ptr<VdmaChannel> channel, const LayerInfo &edge_layer,
- EventPtr network_group_activated_event, uint16_t batch_size,
- std::chrono::milliseconds transfer_timeout, hailo_status &status);
-
- virtual hailo_status activate_stream(uint16_t dynamic_batch_size) override;
- virtual hailo_status deactivate_stream() override;
- virtual Expected<size_t> sync_read_raw_buffer(MemoryView &buffer);
-
- VdmaDevice *m_device;
- std::shared_ptr<VdmaChannel> m_channel;
-
-private:
- hailo_status read_all(MemoryView &buffer) override;
- static uint32_t get_transfer_size(const hailo_stream_info_t &stream_info);
- hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
-
- bool is_stream_activated;
- std::chrono::milliseconds m_transfer_timeout;
- const uint16_t m_max_batch_size;
- uint16_t m_dynamic_batch_size;
- const uint32_t m_transfer_size;
- std::mutex m_read_mutex;
-
- friend class OutputVDeviceBaseStream;
-};
-
-
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_STREAM_HPP_ */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vstream.cpp
- * @brief Implementation of the virtual stream
- **/
-
-#include "hailo/vstream.hpp"
-#include "hailort_defaults.hpp"
-#include "vstream_internal.hpp"
-#include "common/runtime_statistics_internal.hpp"
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#include "rpc/rpc_definitions.hpp"
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-#include <unordered_set>
-
-namespace hailort
-{
-
-static std::map<std::string, AccumulatorPtr> get_pipeline_accumulators_by_type(
- const std::vector<std::shared_ptr<PipelineElement>> &pipeline, AccumulatorType accumulator_type);
-
-static std::map<std::string, std::vector<AccumulatorPtr>> get_pipeline_queue_size_accumulators(
- const std::vector<std::shared_ptr<PipelineElement>> &pipeline);
-
-Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
- hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto transform_context = InputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format,
- dst_quant_info);
- CHECK_EXPECTED(transform_context, "Failed Creating InputTransformContext");
-
- auto buffer_pool = BufferPool::create(transform_context.value()->get_dst_frame_size(), buffer_pool_size, shutdown_event, elem_flags,
- vstream_flags);
- CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto pre_infer_elem_ptr = make_shared_nothrow<PreInferElement>(transform_context.release(),
- buffer_pool.release(), name, timeout, duration_collector.release(), std::move(pipeline_status));
- CHECK_AS_EXPECTED(nullptr != pre_infer_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", pre_infer_elem_ptr->name());
-
- return pre_infer_elem_ptr;
-}
-
-Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const std::string &name,
- const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return PreInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info, name,
- std::chrono::milliseconds(vstream_params.timeout_ms), vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags,
- vstream_params.vstream_stats_flags, shutdown_event, pipeline_status);
-}
-
-PreInferElement::PreInferElement(std::unique_ptr<InputTransformContext> &&transform_context, BufferPoolPtr buffer_pool,
- const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_transform_context(std::move(transform_context)),
- m_pool(buffer_pool),
- m_timeout(timeout)
-{}
-
-Expected<PipelineBuffer> PreInferElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
-{
- LOGGER__ERROR("PreInferElement does not support run_pull operation");
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-std::vector<AccumulatorPtr> PreInferElement::get_queue_size_accumulators()
-{
- if (nullptr == m_pool->get_queue_size_accumulator()) {
- return std::vector<AccumulatorPtr>();
- }
- return {m_pool->get_queue_size_accumulator()};
-}
-
-PipelinePad &PreInferElement::next_pad()
-{
- // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
- return *m_sources[0].next();
-}
-
-std::string PreInferElement::description() const
-{
- std::stringstream element_description;
- element_description << "(" << this->name() << " | " << m_transform_context->description() << ")";
- return element_description.str();
-}
-
-Expected<PipelineBuffer> PreInferElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
-{
- if (PipelineBuffer::Type::FLUSH == input.get_type()) {
- return std::move(input);
- }
-
- auto transformed_buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == transformed_buffer.status()) {
- return make_unexpected(transformed_buffer.status());
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != transformed_buffer.status(), HAILO_TIMEOUT,
- "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_timeout.count());
- CHECK_EXPECTED(transformed_buffer);
-
- auto dst = transformed_buffer->as_view();
- m_duration_collector.start_measurement();
- const auto status = m_transform_context->transform(input.as_view(), dst);
- m_duration_collector.complete_measurement();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- // Note: The latency to be measured starts as the input buffer is sent to the InputVStream (via write())
- transformed_buffer->set_metadata(input.get_metadata());
-
- return transformed_buffer.release();
-}
-
-Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
- const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, const std::string &name,
- hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto transform_context = OutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format,
- dst_quant_info, nms_info);
- CHECK_EXPECTED(transform_context, "Failed Creating OutputTransformContext");
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto post_infer_elem_ptr = make_shared_nothrow<PostInferElement>(transform_context.release(),
- name, duration_collector.release(), std::move(pipeline_status));
- CHECK_AS_EXPECTED(nullptr != post_infer_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", post_infer_elem_ptr->name());
-
- return post_infer_elem_ptr;
-}
-
-Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
- const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return PostInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info, nms_info,
- name, vstream_params.pipeline_elements_stats_flags, pipeline_status);
-}
-
-PostInferElement::PostInferElement(std::unique_ptr<OutputTransformContext> &&transform_context, const std::string &name,
- DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_transform_context(std::move(transform_context))
-{}
-
-hailo_status PostInferElement::run_push(PipelineBuffer &&/*buffer*/)
-{
- LOGGER__ERROR("PostInferElement does not support run_push operation");
- return HAILO_INVALID_OPERATION;
-}
-
-PipelinePad &PostInferElement::next_pad()
-{
- // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
- return *m_sinks[0].prev();
-}
-
-std::string PostInferElement::description() const
-{
- std::stringstream element_description;
- element_description << "(" << this->name() << " | " << m_transform_context->description() << ")";
- return element_description.str();
-}
-
-Expected<PipelineBuffer> PostInferElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
-{
- CHECK_AS_EXPECTED(optional, HAILO_INVALID_ARGUMENT, "Optional buffer must be valid in {}!", name());
-
- // Note: The latency to be measured starts as the buffer is read from the HW (it's 'input' in this case)
- optional.set_metadata(input.get_metadata());
-
- auto dst = optional.as_view();
- m_duration_collector.start_measurement();
- const auto status = m_transform_context->transform(input.as_view(), dst);
- m_duration_collector.complete_measurement();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return std::move(optional);
-}
-
-static hailo_nms_info_t fuse_nms_info(const std::vector<hailo_nms_info_t> &nms_infos)
-{
- hailo_nms_info_t fused_info = nms_infos[0];
- fused_info.is_defused = false;
- fused_info.number_of_classes = 0;
- for (const auto &nms_info : nms_infos) {
- fused_info.number_of_classes += nms_info.number_of_classes;
- }
-
- return fused_info;
-}
-
-Expected<net_flow::YOLOv5PostProcessingOp> nms_element_to_op(const NetFlowYoloNmsElement &element,
- const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
- const std::vector<hailo_quant_info_t> &quant_infos)
-{
- std::vector<std::vector<int>> anchors;
- // Each layer anchors vector is structured as {w,h} pairs.
- auto bbox_decoders = element.bbox_decoders;
- std::sort(bbox_decoders.begin(), bbox_decoders.end(), [](auto &bbox_decoder_0, auto &bbox_decoder_1) {
- return bbox_decoder_0.stream_name < bbox_decoder_1.stream_name;
- });
- for (auto &bbox_decoder : bbox_decoders) {
- std::vector<int> layer_anchors;
- layer_anchors.reserve(bbox_decoder.h.size() + bbox_decoder.w.size());
- assert(bbox_decoder.h.size() == bbox_decoder.w.size());
- for (size_t i = 0; i < bbox_decoder.h.size(); ++i) {
- layer_anchors.push_back(bbox_decoder.w[i]);
- layer_anchors.push_back(bbox_decoder.h[i]);
- }
- anchors.push_back(layer_anchors);
- }
-
- // TODO: Get it from NetFlowYoloNmsElement when adding support for these params.
- static const bool should_dequantize = true;
- static const bool should_sigmoid = false;
- return net_flow::YOLOv5PostProcessingOp::create(anchors, shapes, formats,quant_infos,
- element.image_height, element.image_width, element.nms_score_th,
- element.nms_iou_th, element.classes, should_dequantize, element.max_proposals_per_class, should_sigmoid);
-}
-
-Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(const NetFlowYoloNmsElement &element,
- const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
- const std::vector<hailo_quant_info_t> &quant_infos, hailo_format_t output_format,
- hailo_nms_info_t nms_info, const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
- hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto buffer_pool = BufferPool::create(HailoRTCommon::get_nms_host_frame_size(nms_info, output_format),
- buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
- CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto expected_nms_op = nms_element_to_op(element, shapes, formats, quant_infos);
- auto nms_elem_ptr = make_shared_nothrow<NmsPostProcessMuxElement>(expected_nms_op.release(), buffer_pool.release(),
- name, timeout, duration_collector.release(), std::move(pipeline_status));
- CHECK_AS_EXPECTED(nullptr != nms_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", nms_elem_ptr->name());
-
- return nms_elem_ptr;
-}
-
-Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(const NetFlowYoloNmsElement &op,
- const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
- const std::vector<hailo_quant_info_t> &quant_infos, hailo_nms_info_t nms_info, const std::string &name,
- const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return NmsPostProcessMuxElement::create(op, shapes, formats, quant_infos, vstream_params.user_buffer_format, nms_info, name, std::chrono::milliseconds(vstream_params.timeout_ms),
- vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event,
- pipeline_status);
-}
-
-NmsPostProcessMuxElement::NmsPostProcessMuxElement(const net_flow::YOLOv5PostProcessingOp &op, BufferPoolPtr &&pool,
- const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- BaseMuxElement(3, name, timeout, std::move(duration_collector), std::move(pipeline_status)),
- m_nms_op(op),
- m_pool(std::move(pool))
-{}
-
-std::vector<AccumulatorPtr> NmsPostProcessMuxElement::get_queue_size_accumulators()
-{
- if (nullptr == m_pool->get_queue_size_accumulator()) {
- return std::vector<AccumulatorPtr>();
- }
- return {m_pool->get_queue_size_accumulator()};
-}
-
-Expected<PipelineBuffer> NmsPostProcessMuxElement::action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional)
-{
- std::vector<MemoryView> input_views;
-
- input_views.reserve(inputs.size());
- for (auto &input_buf : inputs) {
- input_views.push_back(input_buf.as_view());
- }
-
- auto acquired_buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
- return make_unexpected(acquired_buffer.status());
- }
- CHECK_EXPECTED(acquired_buffer);
- m_duration_collector.start_measurement();
- auto post_process_result = m_nms_op.execute(input_views, acquired_buffer.value().as_view());
- m_duration_collector.complete_measurement();
- CHECK_SUCCESS_AS_EXPECTED(post_process_result);
- return acquired_buffer;
-}
-
-Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
- hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- const auto &fused_info = fuse_nms_info(nms_infos);
- auto buffer_pool = BufferPool::create(HailoRTCommon::get_nms_hw_frame_size(fused_info),
- buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
- CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto nms_elem_ptr = make_shared_nothrow<NmsMuxElement>(nms_infos, fused_info, buffer_pool.release(),
- name, timeout, duration_collector.release(), std::move(pipeline_status));
- CHECK_AS_EXPECTED(nullptr != nms_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", nms_elem_ptr->name());
-
- return nms_elem_ptr;
-}
-
-Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos, const std::string &name,
- const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- return NmsMuxElement::create(nms_infos, name, std::chrono::milliseconds(vstream_params.timeout_ms), vstream_params.queue_size,
- vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event, pipeline_status);
-}
-
-NmsMuxElement::NmsMuxElement(const std::vector<hailo_nms_info_t> &nms_infos, const hailo_nms_info_t &fused_nms_info, BufferPoolPtr &&pool,
- const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- BaseMuxElement(nms_infos.size(), name, timeout, std::move(duration_collector), std::move(pipeline_status)),
- m_nms_infos(nms_infos),
- m_fused_nms_info(fused_nms_info),
- m_pool(std::move(pool))
-{}
-
-const hailo_nms_info_t &NmsMuxElement::get_fused_nms_info() const
-{
- return m_fused_nms_info;
-}
-
-std::vector<AccumulatorPtr> NmsMuxElement::get_queue_size_accumulators()
-{
- if (nullptr == m_pool->get_queue_size_accumulator()) {
- return std::vector<AccumulatorPtr>();
- }
- return {m_pool->get_queue_size_accumulator()};
-}
-
-Expected<PipelineBuffer> NmsMuxElement::action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional)
-{
- std::vector<MemoryView> input_views;
-
- input_views.reserve(inputs.size());
- for (auto &input_buf : inputs) {
- input_views.push_back(input_buf.as_view());
- }
-
- auto acquired_buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
- return make_unexpected(acquired_buffer.status());
- }
- CHECK_AS_EXPECTED(HAILO_TIMEOUT != acquired_buffer.status(), HAILO_TIMEOUT,
- "{} failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_timeout.count());
- CHECK_EXPECTED(acquired_buffer);
-
- m_duration_collector.start_measurement();
- const auto status = fuse_buffers(input_views, m_nms_infos, acquired_buffer.value().as_view());
- m_duration_collector.complete_measurement();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return acquired_buffer.release();
-}
-
-Expected<std::shared_ptr<TransformDemuxElement>> TransformDemuxElement::create(std::shared_ptr<OutputDemuxer> demuxer,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
- hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- std::vector<BufferPoolPtr> pools;
- pools.reserve(demuxer->get_edges_stream_info().size());
-
- for (const auto& mux_edge : demuxer->get_edges_stream_info()) {
- auto buffer_pool = BufferPool::create(mux_edge.hw_frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
- CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
- pools.push_back(buffer_pool.release());
- }
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto demux_elem_ptr = make_shared_nothrow<TransformDemuxElement>(demuxer, std::move(pools), name, timeout,
- duration_collector.release(), std::move(pipeline_status));
- CHECK_AS_EXPECTED(nullptr != demux_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- return demux_elem_ptr;
-}
-
-TransformDemuxElement::TransformDemuxElement(std::shared_ptr<OutputDemuxer> demuxer, std::vector<BufferPoolPtr> &&pools,
- const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
- BaseDemuxElement(demuxer->get_edges_stream_info().size(), name, timeout, std::move(duration_collector),
- std::move(pipeline_status)),
- m_demuxer(demuxer),
- m_pools(std::move(pools))
-{}
-
-std::vector<AccumulatorPtr> TransformDemuxElement::get_queue_size_accumulators()
-{
- std::vector<AccumulatorPtr> result;
- for (const auto& pool : m_pools) {
- if (nullptr != pool->get_queue_size_accumulator()) {
- result.emplace_back(pool->get_queue_size_accumulator());
- }
- }
- return result;
-}
-
-Expected<std::vector<PipelineBuffer>> TransformDemuxElement::action(PipelineBuffer &&input)
-{
- std::vector<PipelineBuffer> outputs;
- std::vector<MemoryView> raw_buffers;
-
- auto mux_edges = m_demuxer->get_edges_stream_info();
- outputs.reserve(mux_edges.size());
- raw_buffers.reserve(mux_edges.size());
-
- for (uint32_t i = 0; i < mux_edges.size(); i++) {
- auto acquired_buffer = m_pools[i]->acquire_buffer(m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
- return make_unexpected(acquired_buffer.status());
- }
- CHECK_EXPECTED(acquired_buffer, "Failed to acquire buffer");
- outputs.emplace_back(acquired_buffer.release());
-
- raw_buffers.push_back(outputs.back().as_view());
- }
-
- m_duration_collector.start_measurement();
- const auto status = m_demuxer->transform_demux(input.as_view(), raw_buffers);
- m_duration_collector.complete_measurement();
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- return outputs;
-}
-
-BaseVStream::BaseVStream(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator, EventPtr &&network_group_activated_event,
- hailo_status &output_status) :
- m_vstream_info(vstream_info),
- m_vstream_params(vstream_params),
- m_measure_pipeline_latency((vstream_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0),
- m_entry_element(pipeline_entry),
- m_pipeline(std::move(pipeline)),
- m_is_activated(false),
- m_is_aborted(false),
- m_pipeline_status(std::move(pipeline_status)),
- m_shutdown_event(shutdown_event),
- m_network_group_activated_event(std::move(network_group_activated_event)),
- m_fps_accumulators(get_pipeline_accumulators_by_type(m_pipeline, AccumulatorType::FPS)),
- m_latency_accumulators(get_pipeline_accumulators_by_type(m_pipeline, AccumulatorType::LATENCY)),
- m_queue_size_accumulators(get_pipeline_queue_size_accumulators(m_pipeline)),
- m_pipeline_latency_accumulator(pipeline_latency_accumulator)
-{
- output_status = start_vstream();
-}
-
-BaseVStream::BaseVStream(BaseVStream &&other) noexcept :
- m_vstream_info(std::move(other.m_vstream_info)),
- m_vstream_params(std::move(other.m_vstream_params)),
- m_measure_pipeline_latency(std::move(other.m_measure_pipeline_latency)),
- m_entry_element(std::move(other.m_entry_element)),
- m_pipeline(std::move(other.m_pipeline)),
- m_is_activated(std::exchange(other.m_is_activated, false)),
- m_is_aborted(std::exchange(other.m_is_aborted, false)),
- m_pipeline_status(std::move(other.m_pipeline_status)),
- m_shutdown_event(std::move(other.m_shutdown_event)),
- m_network_group_activated_event(std::move(other.m_network_group_activated_event)),
- m_fps_accumulators(std::move(other.m_fps_accumulators)),
- m_latency_accumulators(std::move(other.m_latency_accumulators)),
- m_queue_size_accumulators(std::move(other.m_queue_size_accumulators)),
- m_pipeline_latency_accumulator(std::move(other.m_pipeline_latency_accumulator))
-{}
-
-BaseVStream& BaseVStream::operator=(BaseVStream &&other) noexcept
-{
- if (this != &other) {
- // operator= is used only for vstream creation BEFORE activation. otherwise we should deactivate vstream here
- assert(!m_is_activated);
- m_vstream_info = std::move(other.m_vstream_info);
- m_vstream_params = std::move(other.m_vstream_params);
- m_measure_pipeline_latency = std::move(other.m_measure_pipeline_latency);
- m_entry_element = std::move(other.m_entry_element);
- m_pipeline = std::move(other.m_pipeline);
- m_is_activated = std::exchange(other.m_is_activated, false);
- m_is_aborted = std::exchange(other.m_is_aborted, false);
- m_pipeline_status = std::move(other.m_pipeline_status);
- m_shutdown_event = std::move(other.m_shutdown_event);
- m_network_group_activated_event = std::move(other.m_network_group_activated_event);
- m_fps_accumulators = std::move(other.m_fps_accumulators);
- m_latency_accumulators = std::move(other.m_latency_accumulators);
- m_queue_size_accumulators = std::move(other.m_queue_size_accumulators);
- m_pipeline_latency_accumulator = std::move(other.m_pipeline_latency_accumulator);
- }
- return *this;
-}
-
-hailo_status BaseVStream::start_vstream()
-{
- auto status = m_shutdown_event->reset();
- CHECK_SUCCESS(status);
-
- LOGGER__DEBUG("Activating {}...", name());
- status = m_entry_element->activate();
- CHECK_SUCCESS(status);
-
- status = resume();
- CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
- "Failed to resume stream in {}", name());
-
- m_is_activated = true;
- return HAILO_SUCCESS;
-}
-
-hailo_status BaseVStream::abort()
-{
- m_is_aborted = true;
- return m_entry_element->abort();
-}
-
-hailo_status BaseVStream::resume()
-{
- m_is_aborted = false;
- return m_entry_element->resume();
-}
-
-hailo_status BaseVStream::stop_vstream()
-{
- hailo_status status = HAILO_SUCCESS;
- if (m_is_activated) {
- m_is_activated = false;
- status = m_entry_element->deactivate();
- if (HAILO_SUCCESS != status) {
- LOGGER__WARNING("Failed deactivate of vstream {} status {}", name(), status);
- }
-
- status = m_entry_element->post_deactivate();
- if (HAILO_SUCCESS != status) {
- LOGGER__WARNING("Failed post deactivate of vstream {} status {}", name(), status);
- }
- }
- return status;
-}
-
-hailo_status BaseVStream::stop_and_clear()
-{
- auto status = m_network_group_activated_event->wait(std::chrono::milliseconds(0));
- CHECK(HAILO_TIMEOUT == status, HAILO_INVALID_OPERATION,
- "Trying to clear {} vstream before its network group is deactivated", name());
-
- status = stop_vstream();
- CHECK_SUCCESS(status);
-
- status = m_entry_element->clear();
- CHECK_SUCCESS(status, "Failed clearing vstream {}", name());
-
- const auto curr_pipeline_status = m_pipeline_status->load();
- if (HAILO_SUCCESS != curr_pipeline_status) {
- LOGGER__TRACE("Overwritting current pipeline status {}", curr_pipeline_status);
- m_pipeline_status->store(HAILO_SUCCESS);
- }
-
- return HAILO_SUCCESS;
-}
-
-size_t BaseVStream::get_frame_size() const
-{
- if (HAILO_FORMAT_ORDER_HAILO_NMS == m_vstream_info.format.order) {
- return HailoRTCommon::get_nms_host_frame_size(m_vstream_info.nms_shape, m_vstream_params.user_buffer_format);
- }
- return HailoRTCommon::get_frame_size(m_vstream_info.shape, m_vstream_params.user_buffer_format);
-}
-
-const hailo_vstream_info_t &BaseVStream::get_info() const
-{
- return m_vstream_info;
-}
-
-const hailo_format_t &BaseVStream::get_user_buffer_format() const
-{
- return m_vstream_params.user_buffer_format;
-}
-
-std::string BaseVStream::name() const
-{
- return std::string(m_vstream_info.name);
-}
-
-std::string BaseVStream::network_name() const
-{
- return std::string(m_vstream_info.network_name);
-}
-
-const std::map<std::string, AccumulatorPtr> &BaseVStream::get_fps_accumulators() const
-{
- return m_fps_accumulators;
-}
-
-const std::map<std::string, AccumulatorPtr> &BaseVStream::get_latency_accumulators() const
-{
- return m_latency_accumulators;
-}
-
-const std::map<std::string, std::vector<AccumulatorPtr>> &BaseVStream::get_queue_size_accumulators() const
-{
- return m_queue_size_accumulators;
-}
-
-AccumulatorPtr BaseVStream::get_pipeline_latency_accumulator() const
-{
- return m_pipeline_latency_accumulator;
-}
-
-
-const std::vector<std::shared_ptr<PipelineElement>> &BaseVStream::get_pipeline() const
-{
- return m_pipeline;
-}
-
-Expected<InputVStream> InputVStream::create(const hailo_vstream_info_t &vstream_info,
- const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
- std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr network_group_activated_event,
- AccumulatorPtr pipeline_latency_accumulator)
-{
- auto vstream_internal = InputVStreamInternal::create(vstream_info, vstream_params, pipeline_entry, pipeline_exit,
- std::move(pipeline), std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator);
- CHECK_EXPECTED(vstream_internal);
-
- InputVStream vstream(vstream_internal.release());
- return vstream;
-}
-
-hailo_status InputVStream::write(const MemoryView &buffer)
-{
- return m_vstream->write(std::move(buffer));
-}
-
-hailo_status InputVStream::flush()
-{
- return m_vstream->flush();
-}
-
-hailo_status InputVStream::clear(std::vector<InputVStream> &vstreams)
-{
- for (auto &vstream : vstreams) {
- auto status = vstream.stop_and_clear();
- CHECK_SUCCESS(status);
- }
- for (auto &vstream : vstreams) {
- auto status = vstream.start_vstream();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status InputVStream::clear(std::vector<std::reference_wrapper<InputVStream>> &vstreams)
-{
- for (auto &vstream : vstreams) {
- auto status = vstream.get().stop_and_clear();
- CHECK_SUCCESS(status);
- }
- for (auto &vstream : vstreams) {
- auto status = vstream.get().start_vstream();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status InputVStream::abort()
-{
- return m_vstream->abort();
-}
-
-hailo_status InputVStream::resume()
-{
- return m_vstream->resume();
-}
-
-size_t InputVStream::get_frame_size() const
-{
- return m_vstream->get_frame_size();
-}
-
-const hailo_vstream_info_t &InputVStream::get_info() const
-{
- return m_vstream->get_info();
-}
-
-const hailo_format_t &InputVStream::get_user_buffer_format() const
-{
- return m_vstream->get_user_buffer_format();
-}
-
-std::string InputVStream::name() const
-{
- return m_vstream->name();
-}
-
-std::string InputVStream::network_name() const
-{
- return m_vstream->network_name();
-}
-
-const std::map<std::string, AccumulatorPtr> &InputVStream::get_fps_accumulators() const
-{
- return m_vstream->get_fps_accumulators();
-}
-
-const std::map<std::string, AccumulatorPtr> &InputVStream::get_latency_accumulators() const
-{
- return m_vstream->get_latency_accumulators();
-}
-
-const std::map<std::string, std::vector<AccumulatorPtr>> &InputVStream::get_queue_size_accumulators() const
-{
- return m_vstream->get_queue_size_accumulators();
-}
-
-AccumulatorPtr InputVStream::get_pipeline_latency_accumulator() const
-{
- return m_vstream->get_pipeline_latency_accumulator();
-}
-
-const std::vector<std::shared_ptr<PipelineElement>> &InputVStream::get_pipeline() const
-{
- return m_vstream->get_pipeline();
-}
-
-hailo_status InputVStream::start_vstream()
-{
- return m_vstream->start_vstream();
-}
-
-hailo_status InputVStream::stop_vstream()
-{
- return m_vstream->stop_vstream();
-}
-
-hailo_status InputVStream::stop_and_clear()
-{
- return m_vstream->stop_and_clear();
-}
-
-std::string InputVStream::get_pipeline_description() const
-{
- return m_vstream->get_pipeline_description();
-}
-
-InputVStream::InputVStream(std::shared_ptr<InputVStreamInternal> vstream) : m_vstream(std::move(vstream)) {}
-
-Expected<OutputVStream> OutputVStream::create(
- const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- EventPtr network_group_activated_event, AccumulatorPtr pipeline_latency_accumulator)
-{
- auto vstream_internal = OutputVStreamInternal::create(vstream_info, vstream_params, pipeline_entry,
- std::move(pipeline), std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator);
- CHECK_EXPECTED(vstream_internal);
-
- OutputVStream vstream(vstream_internal.release());
- return vstream;
-}
-
-hailo_status OutputVStream::read(MemoryView buffer)
-{
- return m_vstream->read(std::move(buffer));
-}
-
-hailo_status OutputVStream::clear(std::vector<OutputVStream> &vstreams)
-{
- for (auto &vstream : vstreams) {
- auto status = vstream.stop_and_clear();
- CHECK_SUCCESS(status);
- }
- for (auto &vstream : vstreams) {
- auto status = vstream.start_vstream();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status OutputVStream::abort()
-{
- return m_vstream->abort();
-}
-
-hailo_status OutputVStream::resume()
-{
- return m_vstream->resume();
-}
-
-hailo_status OutputVStream::clear(std::vector<std::reference_wrapper<OutputVStream>> &vstreams)
-{
- for (auto &vstream : vstreams) {
- auto status = vstream.get().stop_and_clear();
- CHECK_SUCCESS(status);
- }
- for (auto &vstream : vstreams) {
- auto status = vstream.get().start_vstream();
- CHECK_SUCCESS(status);
- }
-
- return HAILO_SUCCESS;
-}
-
-size_t OutputVStream::get_frame_size() const
-{
- return m_vstream->get_frame_size();
-}
-
-const hailo_vstream_info_t &OutputVStream::get_info() const
-{
- return m_vstream->get_info();
-}
-
-const hailo_format_t &OutputVStream::get_user_buffer_format() const
-{
- return m_vstream->get_user_buffer_format();
-}
-
-std::string OutputVStream::name() const
-{
- return m_vstream->name();
-}
-
-std::string OutputVStream::network_name() const
-{
- return m_vstream->network_name();
-}
-
-const std::map<std::string, AccumulatorPtr> &OutputVStream::get_fps_accumulators() const
-{
- return m_vstream->get_fps_accumulators();
-}
-
-const std::map<std::string, AccumulatorPtr> &OutputVStream::get_latency_accumulators() const
-{
- return m_vstream->get_latency_accumulators();
-}
-
-const std::map<std::string, std::vector<AccumulatorPtr>> &OutputVStream::get_queue_size_accumulators() const
-{
- return m_vstream->get_queue_size_accumulators();
-}
-
-AccumulatorPtr OutputVStream::get_pipeline_latency_accumulator() const
-{
- return m_vstream->get_pipeline_latency_accumulator();
-}
-
-const std::vector<std::shared_ptr<PipelineElement>> &OutputVStream::get_pipeline() const
-{
- return m_vstream->get_pipeline();
-}
-
-hailo_status OutputVStream::start_vstream()
-{
- return m_vstream->start_vstream();
-}
-
-hailo_status OutputVStream::stop_vstream()
-{
- return m_vstream->stop_vstream();
-}
-
-hailo_status OutputVStream::stop_and_clear()
-{
- return m_vstream->stop_and_clear();
-}
-
-std::string OutputVStream::get_pipeline_description() const
-{
- return m_vstream->get_pipeline_description();
-}
-
-OutputVStream::OutputVStream(std::shared_ptr<OutputVStreamInternal> vstream) : m_vstream(std::move(vstream)) {}
-
-std::map<std::string, AccumulatorPtr> get_pipeline_accumulators_by_type(
- const std::vector<std::shared_ptr<PipelineElement>> &pipeline, AccumulatorType accumulator_type)
-{
- std::map<std::string, AccumulatorPtr> result;
- for (const auto &elem : pipeline) {
- if (nullptr == elem) {
- continue;
- }
-
- AccumulatorPtr accumulator = nullptr;
- if (AccumulatorType::FPS == accumulator_type) {
- accumulator = elem->get_fps_accumulator();
- } else if (AccumulatorType::LATENCY == accumulator_type) {
- accumulator = elem->get_latency_accumulator();
- } else {
- continue;
- }
-
- if (nullptr != accumulator) {
- result.emplace(elem->name(), accumulator);
- }
- }
-
- return result;
-}
-
-std::map<std::string, std::vector<AccumulatorPtr>> get_pipeline_queue_size_accumulators(
- const std::vector<std::shared_ptr<PipelineElement>> &pipeline)
-{
- std::map<std::string, std::vector<AccumulatorPtr>> result;
- for (const auto &elem : pipeline) {
- if (nullptr == elem) {
- continue;
- }
-
- const auto accumulators = elem->get_queue_size_accumulators();
- if (0 != accumulators.size()) {
- result.emplace(elem->name(), accumulators);
- }
- }
-
- return result;
-}
-
-Expected<std::shared_ptr<InputVStreamInternal>> InputVStreamInternal::create(const hailo_vstream_info_t &vstream_info,
- const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
- std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr network_group_activated_event,
- AccumulatorPtr pipeline_latency_accumulator)
-{
- auto vstream = InputVStreamImpl::create(vstream_info, vstream_params, pipeline_entry, pipeline_exit,
- std::move(pipeline), std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator);
- CHECK_EXPECTED(vstream);
- auto vstream_ptr = std::shared_ptr<InputVStreamInternal>(vstream.release());
- return vstream_ptr;
-}
-
-InputVStreamInternal::InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator, EventPtr &&network_group_activated_event,
- hailo_status &output_status) :
- BaseVStream(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
- shutdown_event, pipeline_latency_accumulator, std::move(network_group_activated_event), output_status){}
-
-Expected<std::shared_ptr<InputVStreamImpl>> InputVStreamImpl::create(const hailo_vstream_info_t &vstream_info,
- const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
- std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr network_group_activated_event,
- AccumulatorPtr pipeline_latency_accumulator)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- if (nullptr != pipeline_latency_accumulator) {
- pipeline_exit->sink().set_push_complete_callback([pipeline_latency_accumulator](const PipelineBuffer::Metadata& metadata) {
- const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
- std::chrono::steady_clock::now() - metadata.get_start_time()).count();
- pipeline_latency_accumulator->add_data_point(duration_sec);
- });
- }
-
- auto vstream_ptr = std::shared_ptr<InputVStreamImpl>(new InputVStreamImpl(vstream_info, vstream_params, std::move(pipeline_entry), std::move(pipeline),
- std::move(pipeline_status), shutdown_event, pipeline_latency_accumulator, std::move(network_group_activated_event), status));
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed to create virtual stream");
-
- return vstream_ptr;
-}
-
-InputVStreamImpl::InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
- EventPtr network_group_activated_event, hailo_status &output_status) :
- InputVStreamInternal(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
- shutdown_event, pipeline_latency_accumulator, std::move(network_group_activated_event), output_status)
-{
- if (HAILO_SUCCESS != output_status) {
- return;
- }
- LOGGER__INFO("Creating {}...", name());
-}
-
-InputVStreamImpl::~InputVStreamImpl()
-{
- (void)stop_vstream();
- if (m_is_aborted) {
- // If VStream was aborted, do not clear low-level stream abortion,
- // otherwise flush would be called on low-level stream d-tor when there is no receiver.
- (void)abort();
- }
-}
-
-hailo_status InputVStreamImpl::write(const MemoryView &buffer)
-{
- if (nullptr != m_network_group_activated_event) {
- CHECK(m_is_activated, HAILO_VSTREAM_PIPELINE_NOT_ACTIVATED, "Failed to write buffer! Virtual stream {} is not activated!", name());
- auto status = m_network_group_activated_event->wait(std::chrono::milliseconds(0));
- CHECK(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED,
- "Trying to write to vstream {} before its network group is activated", name());
- }
-
- auto status = m_entry_element->run_push(PipelineBuffer(buffer, m_measure_pipeline_latency));
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- LOGGER__INFO("Sending to VStream was shutdown!");
- status = m_pipeline_status->load();
- }
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Sending to VStream was aborted!");
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- return status;
-}
-
-hailo_status InputVStreamImpl::flush()
-{
- auto status = m_entry_element->run_push(PipelineBuffer(PipelineBuffer::Type::FLUSH));
- CHECK_SUCCESS(status);
-
- status = m_entry_element->flush();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wreturn-type"
-Expected<std::shared_ptr<InputVStreamClient>> InputVStreamClient::create(uint32_t input_vstream_handle)
-{
- grpc::ChannelArguments ch_args;
- ch_args.SetMaxReceiveMessageSize(-1);
- auto channel = grpc::CreateCustomChannel(HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials(), ch_args);
- CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
-
- auto client = std::unique_ptr<HailoRtRpcClient>(new HailoRtRpcClient(channel));
- CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
-
- auto user_buffer_format = client->InputVStream_get_user_buffer_format(input_vstream_handle);
- CHECK_EXPECTED(user_buffer_format);
-
- auto vstream_info = client->InputVStream_get_info(input_vstream_handle);
- CHECK_EXPECTED(vstream_info);
-
- return std::shared_ptr<InputVStreamClient>(new InputVStreamClient(std::move(client), std::move(input_vstream_handle),
- user_buffer_format.release(), vstream_info.release()));
-}
-
-InputVStreamClient::InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t input_vstream_handle, hailo_format_t &&user_buffer_format,
- hailo_vstream_info_t &&info)
- : m_client(std::move(client)), m_handle(std::move(input_vstream_handle)), m_user_buffer_format(user_buffer_format), m_info(info) {}
-
-InputVStreamClient::~InputVStreamClient()
-{
- auto reply = m_client->InputVStream_release(m_handle);
- if (reply != HAILO_SUCCESS) {
- LOGGER__CRITICAL("InputVStream_release failed!");
- }
-}
-
-hailo_status InputVStreamClient::write(const MemoryView &buffer)
-{
- return m_client->InputVStream_write(m_handle, buffer);
-}
-
-hailo_status InputVStreamClient::flush()
-{
- return m_client->InputVStream_flush(m_handle);
-}
-
-hailo_status InputVStreamClient::abort()
-{
- auto channel = grpc::CreateChannel(HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials());
- CHECK(channel != nullptr, HAILO_INTERNAL_FAILURE);
- auto abort_client = std::unique_ptr<HailoRtRpcClient>(new HailoRtRpcClient(channel));
- CHECK(abort_client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
- return abort_client->InputVStream_abort(m_handle);
-}
-
-hailo_status InputVStreamClient::resume()
-{
- return m_client->InputVStream_resume(m_handle);
-}
-
-size_t InputVStreamClient::get_frame_size() const
-{
- auto frame_size = m_client->InputVStream_get_frame_size(m_handle);
- if (!frame_size) {
- LOGGER__CRITICAL("InputVStream_get_frame_size failed with status={}", frame_size.status());
- return 0;
- }
- return frame_size.release();
-}
-
-const hailo_vstream_info_t &InputVStreamClient::get_info() const
-{
- return m_info;
-}
-
-const hailo_format_t &InputVStreamClient::get_user_buffer_format() const
-{
- return m_user_buffer_format;
-}
-
-std::string InputVStreamClient::name() const
-{
- auto expected_name = m_client->InputVStream_name(m_handle);
- if (!expected_name) {
- LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
- return "";
- }
- return expected_name.release();
-}
-
-std::string InputVStreamClient::network_name() const
-{
- auto expected_name = m_client->InputVStream_network_name(m_handle);
- if (!expected_name) {
- LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
- return "";
- }
- return expected_name.release();
-}
-
-const std::map<std::string, AccumulatorPtr> &InputVStreamClient::get_fps_accumulators() const
-{
- LOGGER__ERROR("InputVStream::get_fps_accumulators function is not supported when using multi-process service");
- return m_fps_accumulators;
-}
-const std::map<std::string, AccumulatorPtr> &InputVStreamClient::get_latency_accumulators() const
-{
- LOGGER__ERROR("InputVStream::get_latency_accumulators function is not supported when using multi-process service");
- return m_latency_accumulators;
-}
-
-const std::map<std::string, std::vector<AccumulatorPtr>> &InputVStreamClient::get_queue_size_accumulators() const
-{
- LOGGER__ERROR("InputVStream::get_queue_size_accumulators function is not supported when using multi-process service");
- return m_queue_size_accumulators;
-}
-AccumulatorPtr InputVStreamClient::get_pipeline_latency_accumulator() const
-{
- LOGGER__ERROR("InputVStream::get_pipeline_latency_accumulator function is not supported when using multi-process service");
- return m_pipeline_latency_accumulator;
-}
-const std::vector<std::shared_ptr<PipelineElement>> &InputVStreamClient::get_pipeline() const
-{
- LOGGER__ERROR("InputVStream::get_pipeline function is not supported when using multi-process service");
- return m_pipeline;
-}
-
-#pragma GCC diagnostic pop
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-std::string InputVStreamInternal::get_pipeline_description() const
-{
- std::stringstream pipeline_str;
- pipeline_str << "Input pipeline '" << name() << "': ";
- for (const auto &element : m_pipeline) {
- pipeline_str << element->description() << " >> ";
- }
- pipeline_str << "HW";
- return pipeline_str.str();
-}
-
-Expected<std::shared_ptr<OutputVStreamInternal>> OutputVStreamInternal::create(
- const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- EventPtr network_group_activated_event, AccumulatorPtr pipeline_latency_accumulator)
-{
- auto vstream = OutputVStreamImpl::create(vstream_info, vstream_params, pipeline_entry,
- std::move(pipeline), std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator);
- CHECK_EXPECTED(vstream);
- auto vstream_ptr = std::shared_ptr<OutputVStreamInternal>(vstream.release());
- return vstream_ptr;
-}
-
-OutputVStreamInternal::OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry,
- std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- AccumulatorPtr pipeline_latency_accumulator,
- EventPtr network_group_activated_event, hailo_status &output_status) :
- BaseVStream(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
- shutdown_event, pipeline_latency_accumulator, std::move(network_group_activated_event), output_status){}
-
-Expected<std::shared_ptr<OutputVStreamImpl>> OutputVStreamImpl::create(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- EventPtr network_group_activated_event, AccumulatorPtr pipeline_latency_accumulator)
-{
- hailo_status status = HAILO_UNINITIALIZED;
-
- CHECK_AS_EXPECTED(1 == pipeline_entry->sources().size(), HAILO_INVALID_ARGUMENT,
- "OutputVStream's entry element is expected to have one source");
-
- if (nullptr != pipeline_latency_accumulator) {
- pipeline_entry->sources()[0].set_pull_complete_callback([pipeline_latency_accumulator](const PipelineBuffer::Metadata& metadata) {
- const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
- std::chrono::steady_clock::now() - metadata.get_start_time()).count();
- pipeline_latency_accumulator->add_data_point(duration_sec);
- });
- }
-
- auto vstream_ptr = std::shared_ptr<OutputVStreamImpl>(new OutputVStreamImpl(vstream_info, vstream_params, std::move(pipeline_entry), std::move(pipeline),
- std::move(pipeline_status), shutdown_event, pipeline_latency_accumulator, std::move(network_group_activated_event), status));
- CHECK_SUCCESS_AS_EXPECTED(status, "Failed to create virtual stream");
-
- return vstream_ptr;
-}
-
-std::string OutputVStreamInternal::get_pipeline_description() const
-{
- std::stringstream pipeline_str;
- pipeline_str << "Output pipeline '" << name() << "': HW";
- for (const auto &element : m_pipeline) {
- pipeline_str << " >> " << element->description();
- }
- return pipeline_str.str();
-}
-
-OutputVStreamImpl::OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry,
- std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- AccumulatorPtr pipeline_latency_accumulator,
- EventPtr network_group_activated_event, hailo_status &output_status) :
- OutputVStreamInternal(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
- shutdown_event, pipeline_latency_accumulator, std::move(network_group_activated_event), output_status)
-{
- if (HAILO_SUCCESS != output_status) {
- return;
- }
-
- for (auto &element : m_pipeline) {
- element->set_on_cant_pull_callback([this] () {
- if (m_cant_read_callback) {
- m_cant_read_callback();
- }
- });
- element->set_on_can_pull_callback([this] () {
- if (m_can_read_callback) {
- m_can_read_callback();
- }
- });
- }
-
- LOGGER__INFO("Creating {}...", name());
-}
-
-OutputVStreamImpl::~OutputVStreamImpl()
-{
- (void)stop_vstream();
- if (m_is_aborted) {
- // If VStream was aborted, do not clear low-level stream abortion,
- // otherwise flush would be called on low-level stream d-tor when there is no receiver.
- (void)abort();
- }
-}
-
-hailo_status OutputVStreamImpl::read(MemoryView buffer)
-{
- if (nullptr != m_network_group_activated_event) {
- CHECK(m_is_activated, HAILO_VSTREAM_PIPELINE_NOT_ACTIVATED, "read() failed! Virtual stream {} is not activated!", name());
- auto status = m_network_group_activated_event->wait(std::chrono::milliseconds(0));
- if (HAILO_TIMEOUT == status) {
- LOGGER__INFO("Trying to read from vstream {} before its network_group is activated", name());
- return HAILO_NETWORK_GROUP_NOT_ACTIVATED;
- }
- CHECK_SUCCESS(status);
- }
-
- assert(1 == m_entry_element->sources().size());
- auto recv_buffer = m_entry_element->sources()[0].run_pull(PipelineBuffer(buffer, m_measure_pipeline_latency));
- auto status = recv_buffer.status();
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- LOGGER__INFO("Receiving to VStream was shutdown!");
- status = m_pipeline_status->load();
- }
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Receiving to VStream was aborted!");
- m_entry_element->wait_for_finish();
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- return status;
-}
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wreturn-type"
-Expected<std::shared_ptr<OutputVStreamClient>> OutputVStreamClient::create(uint32_t outputs_vstream_handle)
-{
- grpc::ChannelArguments ch_args;
- ch_args.SetMaxReceiveMessageSize(-1);
- auto channel = grpc::CreateCustomChannel(HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials(), ch_args);
- CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
-
- auto client = std::unique_ptr<HailoRtRpcClient>(new HailoRtRpcClient(channel));
- CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
-
- auto user_buffer_format = client->OutputVStream_get_user_buffer_format(outputs_vstream_handle);
- CHECK_EXPECTED(user_buffer_format);
-
- auto info = client->OutputVStream_get_info(outputs_vstream_handle);
- CHECK_EXPECTED(info);
-
- return std::shared_ptr<OutputVStreamClient>(new OutputVStreamClient(std::move(client), std::move(outputs_vstream_handle),
- user_buffer_format.release(), info.release()));
-}
-
-OutputVStreamClient::OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t outputs_vstream_handle, hailo_format_t &&user_buffer_format,
- hailo_vstream_info_t &&info)
- : m_client(std::move(client)), m_handle(std::move(outputs_vstream_handle)), m_user_buffer_format(user_buffer_format), m_info(info) {}
-
-OutputVStreamClient::~OutputVStreamClient()
-{
- auto reply = m_client->OutputVStream_release(m_handle);
- if (reply != HAILO_SUCCESS) {
- LOGGER__CRITICAL("OutputVStream_release failed!");
- }
-}
-
-hailo_status OutputVStreamClient::read(MemoryView buffer)
-{
- return m_client->OutputVStream_read(m_handle, buffer);
-}
-
-hailo_status OutputVStreamClient::abort()
-{
- auto channel = grpc::CreateChannel(HAILO_DEFAULT_UDS_ADDR, grpc::InsecureChannelCredentials());
- CHECK(channel != nullptr, HAILO_INTERNAL_FAILURE);
- auto abort_client = std::unique_ptr<HailoRtRpcClient>(new HailoRtRpcClient(channel));
- CHECK(abort_client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
- return abort_client->OutputVStream_abort(m_handle);
-}
-
-hailo_status OutputVStreamClient::resume()
-{
- return m_client->OutputVStream_resume(m_handle);
-}
-
-size_t OutputVStreamClient::get_frame_size() const
-{
- auto frame_size = m_client->OutputVStream_get_frame_size(m_handle);
- if (!frame_size) {
- LOGGER__CRITICAL("OutputVStream_get_frame_size failed with status={}", frame_size.status());
- return 0;
- }
- return frame_size.release();
-}
-
-const hailo_vstream_info_t &OutputVStreamClient::get_info() const
-{
- return m_info;
-}
-
-const hailo_format_t &OutputVStreamClient::get_user_buffer_format() const
-{
- return m_user_buffer_format;
-}
-
-std::string OutputVStreamClient::name() const
-{
- auto expected_name = m_client->OutputVStream_name(m_handle);
- if (!expected_name) {
- LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
- return "";
- }
- return expected_name.release();
-}
-
-std::string OutputVStreamClient::network_name() const
-{
- auto expected_name = m_client->OutputVStream_network_name(m_handle);
- if (!expected_name) {
- LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
- return "";
- }
- return expected_name.release();
-}
-
-const std::map<std::string, AccumulatorPtr> &OutputVStreamClient::get_fps_accumulators() const
-{
- LOGGER__ERROR("OutputVStream::get_fps_accumulators function is not supported when using multi-process service");
- return m_fps_accumulators;
-}
-const std::map<std::string, AccumulatorPtr> &OutputVStreamClient::get_latency_accumulators() const
-{
- LOGGER__ERROR("OutputVStream::get_latency_accumulators functoin is not supported when using multi-process service");
- return m_latency_accumulators;
-}
-
-const std::map<std::string, std::vector<AccumulatorPtr>> &OutputVStreamClient::get_queue_size_accumulators() const
-{
- LOGGER__ERROR("OutputVStream::get_queue_size_accumulators function is not supported when using multi-process service");
- return m_queue_size_accumulators;
-}
-AccumulatorPtr OutputVStreamClient::get_pipeline_latency_accumulator() const
-{
- LOGGER__ERROR("OutputVStream::get_pipeline_latency_accumulator function is not supported when using multi-process service");
- return m_pipeline_latency_accumulator;
-}
-const std::vector<std::shared_ptr<PipelineElement>> &OutputVStreamClient::get_pipeline() const
-{
- LOGGER__ERROR("OutputVStream::get_pipeline function is not supported when using multi-process service");
- return m_pipeline;
-}
-
-#pragma GCC diagnostic pop
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-Expected<std::shared_ptr<HwReadElement>> HwReadElement::create(std::shared_ptr<OutputStream> stream, const std::string &name, std::chrono::milliseconds timeout,
- size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::unique_ptr<OutputTransformContext> transform_context)
-{
- auto buffer_pool = BufferPool::create(stream->get_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
- CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
-
- BufferPoolPtr transform_pool = nullptr;
- if (transform_context) {
- auto expected_transform_pool = BufferPool::create(transform_context->get_dst_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
- CHECK_EXPECTED(expected_transform_pool, "Failed creating BufferPool for {}", name);
- transform_pool = expected_transform_pool.release();
- }
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto hw_read_elem_ptr = make_shared_nothrow<HwReadElement>(stream, buffer_pool.release(), name, timeout,
- duration_collector.release(), shutdown_event, std::move(pipeline_status), transform_pool, std::move(transform_context));
- CHECK_AS_EXPECTED(nullptr != hw_read_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", hw_read_elem_ptr->name());
-
- return hw_read_elem_ptr;
-}
-
-HwReadElement::HwReadElement(std::shared_ptr<OutputStream> stream, BufferPoolPtr buffer_pool, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- BufferPoolPtr transform_pool, std::unique_ptr<OutputTransformContext> transform_context) :
- SourceElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_stream(stream),
- m_pool(buffer_pool),
- m_transform_pool(transform_pool),
- m_timeout(timeout),
- m_shutdown_event(shutdown_event),
- m_activation_wait_or_shutdown(stream->get_network_group_activated_event(), shutdown_event),
- m_transform_context(std::move(transform_context))
-{}
-
-uint32_t HwReadElement::get_invalid_frames_count()
-{
- return m_stream->get_invalid_frames_count();
-}
-
-std::string HwReadElement::description() const
-{
- std::stringstream element_description;
- element_description << "(" << this->name() << " | hw_frame_size: " << m_stream->get_info().hw_frame_size << ")";
-
- return element_description.str();
-}
-
-hailo_status HwReadElement::execute_post_deactivate()
-{
- auto status = m_stream->clear_abort();
- CHECK(((HAILO_SUCCESS == status) || (HAILO_STREAM_NOT_ACTIVATED == status)), status,
- "Failed to clear abort stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwReadElement::execute_clear()
-{
- return HAILO_SUCCESS;
-}
-
-hailo_status HwReadElement::execute_flush()
-{
- return HAILO_INVALID_OPERATION;
-}
-
-hailo_status HwReadElement::execute_abort()
-{
- auto status = m_stream->abort();
- CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
- "Failed to execute abort stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwReadElement::execute_resume()
-{
- auto status = m_stream->clear_abort();
- CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
- "Failed to execute resume stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwReadElement::execute_wait_for_finish()
-{
- return HAILO_SUCCESS;
-}
-
-std::vector<AccumulatorPtr> HwReadElement::get_queue_size_accumulators()
-{
- if (nullptr == m_pool->get_queue_size_accumulator()) {
- return std::vector<AccumulatorPtr>();
- }
- return {m_pool->get_queue_size_accumulator()};
-}
-
-hailo_status HwReadElement::run_push(PipelineBuffer &&/*buffer*/)
-{
- return HAILO_INVALID_OPERATION;
-}
-
-Expected<PipelineBuffer> HwReadElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
-{
- auto buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
- return make_unexpected(buffer.status());
- }
- CHECK_EXPECTED(buffer, "{} (D2H) failed with status={}", name(), buffer.status());
-
- while (true) {
- if (!m_stream->is_scheduled()) {
- auto status = m_activation_wait_or_shutdown.wait(m_timeout);
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
- if (HAILO_TIMEOUT == status) {
- return make_unexpected(HAILO_NETWORK_GROUP_NOT_ACTIVATED);
- }
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else {
- auto status = m_activation_wait_or_shutdown.wait(std::chrono::milliseconds(0));
- if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
- return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
- }
- }
-
- MemoryView buffer_view(buffer.value().as_view());
- m_duration_collector.start_measurement();
- auto status = m_stream->read(buffer_view);
- if (HAILO_INVALID_FRAME == status) {
- m_stream->increase_invalid_frames_count(1);
- status = HAILO_SUCCESS;
- }
- if (HAILO_STREAM_NOT_ACTIVATED == status) {
- // Try again
- continue;
- }
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Reading from stream was aborted!");
- return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
- }
- CHECK_SUCCESS_AS_EXPECTED(status, "{} (D2H) failed with status={}", name(), status);
- m_duration_collector.complete_measurement();
-
- // TODO: This is for rare cases where a transormation is needed before another pipeline element
- // Should be handled by the computational graph, and not here.
- if (m_transform_context) {
- auto transform_buffer = m_transform_pool->get_available_buffer(PipelineBuffer(), m_timeout);
- CHECK_EXPECTED(buffer);
- status = m_transform_context->transform(buffer_view, transform_buffer.value().as_view());
- CHECK_SUCCESS_AS_EXPECTED(status);
- return transform_buffer.release();
- }
-
- return buffer.release();
- }
-}
-
-hailo_status HwReadElement::execute_activate()
-{
- return HAILO_SUCCESS;
-}
-
-hailo_status HwReadElement::execute_deactivate()
-{
- auto signal_shutdown_status = m_shutdown_event->signal();
- if (HAILO_SUCCESS != signal_shutdown_status) {
- LOGGER__ERROR("Signaling {} shutdown event failed with {}", name(), signal_shutdown_status);
- }
-
- auto abort_status = m_stream->abort();
- if ((HAILO_SUCCESS != abort_status) && (HAILO_STREAM_NOT_ACTIVATED != abort_status)) {
- LOGGER__ERROR("Abort {} failed with {}", name(), abort_status);
- return abort_status;
- }
-
- return signal_shutdown_status;
-}
-
-Expected<std::shared_ptr<HwWriteElement>> HwWriteElement::create(std::shared_ptr<InputStream> stream, const std::string &name,
- hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
-
- auto duration_collector = DurationCollector::create(elem_flags);
- CHECK_EXPECTED(duration_collector);
-
- auto got_flush_event = Event::create_shared(Event::State::not_signalled);
- CHECK_AS_EXPECTED(nullptr != got_flush_event, HAILO_OUT_OF_HOST_MEMORY);
-
- auto hw_write_elem_ptr = make_shared_nothrow<HwWriteElement>(stream, name,
- duration_collector.release(), std::move(pipeline_status), got_flush_event);
- CHECK_AS_EXPECTED(nullptr != hw_write_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", hw_write_elem_ptr->name());
-
- return hw_write_elem_ptr;
-}
-
-HwWriteElement::HwWriteElement(std::shared_ptr<InputStream> stream, const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event) :
- SinkElement(name, std::move(duration_collector), std::move(pipeline_status)),
- m_stream(stream), m_got_flush_event(got_flush_event)
-{}
-
-Expected<PipelineBuffer> HwWriteElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
-{
- return make_unexpected(HAILO_INVALID_OPERATION);
-}
-
-hailo_status HwWriteElement::run_push(PipelineBuffer &&buffer)
-{
- if (PipelineBuffer::Type::FLUSH == buffer.get_type()) {
- hailo_status flush_status = m_stream->flush();
- if (HAILO_STREAM_ABORTED_BY_USER == flush_status) {
- LOGGER__INFO("Failed flushing input stream {} because stream was aborted", m_stream->to_string());
- } else if (HAILO_SUCCESS != flush_status) {
- LOGGER__ERROR("flush has failed in {} with status {}", name(), flush_status);
- }
- hailo_status status = m_got_flush_event->signal();
- CHECK_SUCCESS(status);
- return HAILO_SUCCESS;
- }
-
- m_duration_collector.start_measurement();
- const auto status = m_stream->write(MemoryView(buffer.data(), buffer.size()));
- m_duration_collector.complete_measurement();
-
- if (HAILO_STREAM_ABORTED_BY_USER == status) {
- LOGGER__INFO("Failed to send on input stream {} because stream was aborted", m_stream->to_string());
- return HAILO_STREAM_ABORTED_BY_USER;
- }
- CHECK_SUCCESS(status, "{} (H2D) failed with status={}", name(), status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_activate()
-{
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_deactivate()
-{
- // The flush operation will block until all buffers currently in the pipeline will be processed.
- // We assume that no buffers are sent after the call for deactivate.
- hailo_status flush_status = m_stream->flush();
- if (HAILO_STREAM_ABORTED_BY_USER == flush_status) {
- LOGGER__INFO("Failed flushing input stream {} because stream was aborted", m_stream->to_string());
- // TODO: HRT-3621
- return HAILO_SUCCESS;
- } else if (HAILO_SUCCESS != flush_status) {
- LOGGER__ERROR("flush has failed in {} with status {}", name(), flush_status);
- }
-
- auto abort_status = m_stream->abort();
- CHECK(((abort_status == HAILO_SUCCESS) || (abort_status == HAILO_STREAM_NOT_ACTIVATED)), abort_status,
- "Failed to abort stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_post_deactivate()
-{
- auto status = m_stream->clear_abort();
- CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
- "Failed to clear abort stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_clear()
-{
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_flush()
-{
- hailo_status status = m_got_flush_event->wait(m_stream->get_timeout());
- CHECK_SUCCESS(status);
-
- status = m_got_flush_event->reset();
- CHECK_SUCCESS(status);
-
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_abort()
-{
- auto status = m_stream->abort();
- CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
- "Failed to execute abort stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_resume()
-{
- auto status = m_stream->clear_abort();
- CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
- "Failed to execute resume stream in {}", name());
- return HAILO_SUCCESS;
-}
-
-hailo_status HwWriteElement::execute_wait_for_finish()
-{
- return HAILO_SUCCESS;
-}
-
-std::string HwWriteElement::description() const
-{
- std::stringstream element_description;
- element_description << "(" << this->name() << " | hw_frame_size: " << m_stream->get_info().hw_frame_size << ")";
-
- return element_description.str();
-}
-
-Expected<std::shared_ptr<CopyBufferElement>> CopyBufferElement::create(const std::string &name,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
-{
- auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
- CHECK_EXPECTED(duration_collector);
- auto elem_ptr = make_shared_nothrow<CopyBufferElement>(name, duration_collector.release(), std::move(pipeline_status));
- CHECK_AS_EXPECTED(nullptr != elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- LOGGER__INFO("Created {}", elem_ptr->name());
-
- return elem_ptr;
-}
-
-CopyBufferElement::CopyBufferElement(const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status) :
- FilterElement(name, std::move(duration_collector), std::move(pipeline_status))
-{}
-
-PipelinePad &CopyBufferElement::next_pad()
-{
- // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
- return *m_sinks[0].prev();
-}
-
-Expected<PipelineBuffer> CopyBufferElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
-{
- CHECK_AS_EXPECTED(optional, HAILO_INVALID_ARGUMENT, "Optional buffer must be passed to CopyBufferElement!");
-
- CHECK_AS_EXPECTED(optional.size() == input.size(), HAILO_INVALID_ARGUMENT, "Optional buffer size does not equal to the input buffer size!");
- memcpy(optional.data(), input.data(), optional.size());
-
- return std::move(optional);
-}
-
-Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> VStreamsBuilder::create_vstreams(
- ConfiguredNetworkGroup &net_group, bool quantized, hailo_format_type_t format_type,
- const std::string &network_name)
-{
- const auto params = HailoRTDefaults::get_vstreams_params(quantized, format_type);
- return create_vstreams(net_group, params, network_name);
-}
-
-Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> VStreamsBuilder::create_vstreams(
- ConfiguredNetworkGroup &net_group, const hailo_vstream_params_t &vstreams_params,
- const std::string &network_name)
-{
- std::map<std::string, hailo_vstream_params_t> vstreams_params_by_input_stream_name;
- auto input_vstream_params = net_group.make_input_vstream_params(true, HAILO_FORMAT_TYPE_AUTO,
- HAILO_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_name);
- CHECK_EXPECTED(input_vstream_params);
-
- for (auto params_pair : input_vstream_params.release()) {
- vstreams_params_by_input_stream_name.emplace(std::make_pair(params_pair.first, vstreams_params));
- }
-
- auto expected_all_inputs = create_input_vstreams(net_group, vstreams_params_by_input_stream_name);
- CHECK_EXPECTED(expected_all_inputs);
-
- std::map<std::string, hailo_vstream_params_t> vstreams_params_by_output_stream_name;
- auto output_vstream_params = net_group.make_output_vstream_params(true, HAILO_FORMAT_TYPE_AUTO,
- HAILO_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_name);
- CHECK_EXPECTED(output_vstream_params);
-
- for (auto params_pair : output_vstream_params.release()) {
- vstreams_params_by_output_stream_name.emplace(std::make_pair(params_pair.first, vstreams_params));
- }
-
- auto expected_all_outputs = create_output_vstreams(net_group, vstreams_params_by_output_stream_name);
- CHECK_EXPECTED(expected_all_outputs);
-
- return std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>(
- expected_all_inputs.release(), expected_all_outputs.release());
-}
-
-static hailo_vstream_params_t expand_vstream_params_autos(const hailo_stream_info_t &stream_info,
- const hailo_vstream_params_t &vstream_params)
-{
- auto local_vstream_params = vstream_params;
- local_vstream_params.user_buffer_format = HailoRTDefaults::expand_auto_format(vstream_params.user_buffer_format,
- stream_info.format);
- return local_vstream_params;
-}
-
-Expected<std::vector<InputVStream>> VStreamsBuilder::create_input_vstreams(ConfiguredNetworkGroup &net_group,
- const std::map<std::string, hailo_vstream_params_t> &inputs_params)
-{
- return net_group.create_input_vstreams(inputs_params);
-}
-
-Expected<std::vector<OutputVStream>> VStreamsBuilder::create_output_vstreams(ConfiguredNetworkGroup &net_group,
- const std::map<std::string, hailo_vstream_params_t> &outputs_params)
-{
- return net_group.create_output_vstreams(outputs_params);
-}
-
-Expected<std::vector<InputVStream>> VStreamsBuilderUtils::create_inputs(std::shared_ptr<InputStream> input_stream, const hailo_vstream_info_t &vstream_info,
- const hailo_vstream_params_t &vstream_params)
-{
- // TODO (HRT-4522): Support this measurement
- CHECK_AS_EXPECTED(!(vstream_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_FPS), HAILO_NOT_IMPLEMENTED,
- "Pipeline FPS statistics measurement is not implemented");
-
- std::vector<std::shared_ptr<PipelineElement>> elements;
- std::vector<InputVStream> vstreams;
-
- EventPtr network_group_activated_event = nullptr;
- if (!input_stream->is_scheduled()) {
- network_group_activated_event = input_stream->get_network_group_activated_event();
- }
-
- auto shutdown_event = Event::create_shared(Event::State::not_signalled);
- CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
-
- auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
- CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
-
- auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
- CHECK_EXPECTED(pipeline_latency_accumulator);
-
- auto user_timeout = std::chrono::milliseconds(vstream_params.timeout_ms);
-
- auto hw_write_elem = HwWriteElement::create(input_stream,
- PipelineObject::create_element_name("HwWriteElement", input_stream->name(), input_stream->get_info().index),
- vstream_params.pipeline_elements_stats_flags, pipeline_status);
- CHECK_EXPECTED(hw_write_elem);
- elements.insert(elements.begin(), hw_write_elem.value());
-
- auto should_transform = InputTransformContext::is_transformation_required(input_stream->get_info().shape,
- vstream_params.user_buffer_format, input_stream->get_info().hw_shape, input_stream->get_info().format,
- input_stream->get_info().quant_info);
-
- if (should_transform) {
- std::shared_ptr<SinkElement> elem_after_post_infer = hw_write_elem.value();
- auto queue_elem = PushQueueElement::create(
- PipelineObject::create_element_name("PushQueueElement", input_stream->get_info().name, input_stream->get_info().index),
- vstream_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED(queue_elem);
- elements.insert(elements.begin(), queue_elem.value());
- CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(queue_elem.value(), hw_write_elem.value()));
-
- auto pre_infer_elem = PreInferElement::create(input_stream->get_info().shape, vstream_params.user_buffer_format,
- input_stream->get_info().hw_shape, input_stream->get_info().format, input_stream->get_info().quant_info,
- PipelineObject::create_element_name("PreInferElement", input_stream->get_info().name, input_stream->get_info().index),
- vstream_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED(pre_infer_elem);
- elements.insert(elements.begin(), pre_infer_elem.value());
- CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_infer_elem.value(), queue_elem.value()));
-
- input_stream->set_timeout(user_timeout);
- auto vstream = InputVStream::create(vstream_info, vstream_params, pre_infer_elem.release(), hw_write_elem.release(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED(vstream);
- vstreams.emplace_back(vstream.release());
- } else {
- input_stream->set_timeout(user_timeout);
- auto vstream = InputVStream::create(vstream_info, vstream_params, hw_write_elem.value(), hw_write_elem.value(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED(vstream);
- vstreams.emplace_back(vstream.release());
- }
-
- for (const auto &vstream : vstreams) {
- LOGGER__INFO("{}", vstream.get_pipeline_description());
- }
-
- return vstreams;
-}
-
-Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_outputs(std::shared_ptr<OutputStream> output_stream,
- NameToVStreamParamsMap &vstreams_params_map, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
-{
- std::vector<std::shared_ptr<PipelineElement>> elements;
- std::vector<OutputVStream> vstreams;
-
- EventPtr network_group_activated_event = nullptr;
- if (!output_stream->is_scheduled()) {
- network_group_activated_event = output_stream->get_network_group_activated_event();
- }
-
- auto shutdown_event = Event::create_shared(Event::State::not_signalled);
- CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
-
- auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
- CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
-
- assert(!vstreams_params_map.empty());
-
- // Note: In case of multiple values in vstreams_params_map (e.g. in the case of demux), we'll set the
- // pipeline_elements_stats_flags for the hw_read_element as bitwise or of all the flags.
- hailo_pipeline_elem_stats_flags_t hw_read_element_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
- hailo_vstream_stats_flags_t hw_read_stream_stats_flags = HAILO_VSTREAM_STATS_NONE;
- size_t buffer_pool_size = 0;
- for (const auto &elem_name_params : vstreams_params_map) {
- hw_read_element_stats_flags |= elem_name_params.second.pipeline_elements_stats_flags;
- hw_read_stream_stats_flags |= elem_name_params.second.vstream_stats_flags;
- buffer_pool_size += elem_name_params.second.queue_size;
- }
-
- // TODO (HRT-4522): Support this measurement
- CHECK_AS_EXPECTED(!(hw_read_stream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_FPS), HAILO_NOT_IMPLEMENTED,
- "Pipeline FPS statistics measurement is not implemented");
-
- auto hw_read_elem = HwReadElement::create(output_stream,
- PipelineObject::create_element_name("HwReadElement", output_stream->name(), output_stream->get_info().index),
- HAILO_INFINITE_TIMEOUT, buffer_pool_size, hw_read_element_stats_flags, hw_read_stream_stats_flags, shutdown_event, pipeline_status);
- CHECK_EXPECTED(hw_read_elem);
- elements.push_back(hw_read_elem.value());
-
- if (output_stream->get_info().is_mux) {
- hailo_status status = add_demux(output_stream, vstreams_params_map, std::move(elements), vstreams, hw_read_elem.value(),
- shutdown_event, pipeline_status, output_vstream_infos);
- CHECK_SUCCESS_AS_EXPECTED(status);
- } else {
- auto vstream_info = output_vstream_infos.find(output_stream->name());
- CHECK_AS_EXPECTED(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
- "Failed to find vstream info of {}", output_stream->name());
-
- assert(1 == vstreams_params_map.size());
- auto vstream_params = expand_vstream_params_autos(output_stream->get_info(), vstreams_params_map.begin()->second);
-
- auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
- CHECK_EXPECTED(pipeline_latency_accumulator);
-
- auto should_transform = OutputTransformContext::is_transformation_required(output_stream->get_info().hw_shape,
- output_stream->get_info().format, output_stream->get_info().shape,
- vstream_params.user_buffer_format, output_stream->get_info().quant_info);
-
- if (should_transform) {
- auto hw_read_queue_elem = PullQueueElement::create(
- PipelineObject::create_element_name("PullQueueElement_hw_read", output_stream->name(), output_stream->get_info().index),
- vstream_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED(hw_read_queue_elem);
- elements.push_back(hw_read_queue_elem.value());
- CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_elem.value(), hw_read_queue_elem.value()));
-
- auto post_infer_elem = PostInferElement::create(output_stream->get_info().hw_shape, output_stream->get_info().format,
- output_stream->get_info().shape, vstream_params.user_buffer_format, output_stream->get_info().quant_info, output_stream->get_info().nms_info,
- PipelineObject::create_element_name("PostInferElement", output_stream->name(), output_stream->get_info().index),
- vstream_params, pipeline_status);
- CHECK_EXPECTED(post_infer_elem);
- elements.push_back(post_infer_elem.value());
- CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_queue_elem.value(), post_infer_elem.value()));
-
- auto post_infer_queue_elem = UserBufferQueueElement::create(
- PipelineObject::create_element_name("UserBufferQueueElement_post_infer", output_stream->name(), output_stream->get_info().index),
- vstream_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED(post_infer_queue_elem);
- elements.push_back(post_infer_queue_elem.value());
- CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
-
- output_stream->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
- hw_read_queue_elem->get()->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
- auto vstream = OutputVStream::create(vstream_info->second, vstream_params, post_infer_queue_elem.release(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED(vstream);
- vstreams.emplace_back(vstream.release());
- } else {
- output_stream->set_timeout(std::chrono::milliseconds(vstream_params.timeout_ms));
- auto vstream = OutputVStream::create(vstream_info->second, vstream_params, hw_read_elem.release(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED(vstream);
- vstreams.emplace_back(vstream.release());
- }
- }
-
- for (const auto &vstream : vstreams) {
- LOGGER__INFO("{}", vstream.get_pipeline_description());
- }
-
- return vstreams;
-}
-
-InputVStream VStreamsBuilderUtils::create_input(std::shared_ptr<InputVStreamInternal> input_vstream)
-{
- return InputVStream(std::move(input_vstream));
-}
-
-OutputVStream VStreamsBuilderUtils::create_output(std::shared_ptr<OutputVStreamInternal> output_vstream)
-{
- return OutputVStream(std::move(output_vstream));
-}
-
-static bool are_formats_equal(const hailo_format_t &format1, const hailo_format_t &format2) {
- return ((format1.order == format2.order) && (format1.flags == format2.flags) && (format1.type == format2.type));
-}
-
-Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_nms(OutputStreamPtrVector &output_streams,
- hailo_vstream_params_t vstreams_params,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
-{
- for (const auto &out_stream : output_streams) {
- CHECK_AS_EXPECTED(are_formats_equal(output_streams[0]->get_info().format, out_stream->get_info().format),
- HAILO_INVALID_ARGUMENT, "All nms streams of the same virtual output must have the same format");
- }
-
- auto shutdown_event = Event::create_shared(Event::State::not_signalled);
- CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
-
- auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
- CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
-
- std::vector<std::shared_ptr<PipelineElement>> elements;
- std::vector<OutputVStream> vstreams;
-
- hailo_status status = add_nms_fuse(output_streams, vstreams_params, elements, vstreams, shutdown_event,
- pipeline_status, output_vstream_infos);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- for (const auto &vstream : vstreams) {
- LOGGER__INFO("{}", vstream.get_pipeline_description());
- }
-
- return vstreams;
-}
-
-Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_process_nms(OutputStreamPtrVector &output_streams,
- hailo_vstream_params_t vstreams_params,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
- const NetFlowYoloNmsElement &nms_op)
-{
- static const auto EXPECTED_OUTPUTS = 3;
- CHECK_AS_EXPECTED(output_streams.size() == EXPECTED_OUTPUTS,
- HAILO_INVALID_ARGUMENT, "Core expected to have exactly {} outputs when using NMS post-processing", EXPECTED_OUTPUTS);
-
- std::sort(output_streams.begin(), output_streams.end(), [](auto &stream_0, auto &stream_1) {
- std::string name0(stream_0->get_info().name);
- std::string name1(stream_1->get_info().name);
- return name0 < name1;
- });
-
- auto shutdown_event = Event::create_shared(Event::State::not_signalled);
- CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
-
- auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
- CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
-
- std::vector<std::shared_ptr<PipelineElement>> elements;
- std::vector<OutputVStream> vstreams;
-
- hailo_status status = add_nms_post_process(output_streams, vstreams_params, elements, vstreams, shutdown_event,
- pipeline_status, output_vstream_infos, nms_op);
- CHECK_SUCCESS_AS_EXPECTED(status);
-
- for (const auto &vstream : vstreams) {
- LOGGER__INFO("{}", vstream.get_pipeline_description());
- }
-
- return vstreams;
-}
-
-hailo_status VStreamsBuilderUtils::add_demux(std::shared_ptr<OutputStream> output_stream, NameToVStreamParamsMap &vstreams_params_map,
- std::vector<std::shared_ptr<PipelineElement>> &&base_elements, std::vector<OutputVStream> &vstreams,
- std::shared_ptr<HwReadElement> hw_read_elem, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
-{
- auto expected_demuxer = OutputDemuxer::create(*output_stream);
- CHECK_EXPECTED_AS_STATUS(expected_demuxer);
-
- std::shared_ptr<OutputDemuxer> demuxer_ptr = expected_demuxer.release();
- CHECK(nullptr != demuxer_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
- auto status = output_stream->set_timeout(HAILO_INFINITE_TIMEOUT);
- CHECK_SUCCESS(status);
-
- // Note: In case of multiple values in vstreams_params_map (e.g. in the case of demux), we'll set the
- // pipeline_elements_stats_flags for the demux_elem as bitwise or of all the flags.
- hailo_pipeline_elem_stats_flags_t demux_elem_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
- hailo_vstream_stats_flags_t demux_vstream_stats_flags = HAILO_VSTREAM_STATS_NONE;
- size_t buffer_pool_size = 0;
- for (const auto &elem_name_params : vstreams_params_map) {
- demux_elem_stats_flags |= elem_name_params.second.pipeline_elements_stats_flags;
- demux_vstream_stats_flags |= elem_name_params.second.vstream_stats_flags;
- buffer_pool_size += elem_name_params.second.queue_size;
- }
-
- auto demux_elem = TransformDemuxElement::create(demuxer_ptr,
- PipelineObject::create_element_name("TransformDemuxElement", output_stream->name(), output_stream->get_info().index),
- std::chrono::milliseconds(HAILO_INFINITE), buffer_pool_size, demux_elem_stats_flags, demux_vstream_stats_flags, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(demux_elem);
- base_elements.push_back(demux_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(hw_read_elem, demux_elem.value()));
-
- EventPtr network_group_activated_event = nullptr;
- if (!output_stream->is_scheduled()) {
- network_group_activated_event = output_stream->get_network_group_activated_event();
- }
-
- uint32_t i = 0;
- for (auto &edge_info : demuxer_ptr->get_edges_stream_info()) {
- auto name_params_pair = vstreams_params_map.find(edge_info.name);
- CHECK(name_params_pair != vstreams_params_map.end(), HAILO_NOT_FOUND,
- "Failed to find vstreams params of edge {}", edge_info.name);
-
- const auto vstream_info = output_vstream_infos.find(edge_info.name);
- CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
- "Failed to find vstream info of {}", edge_info.name);
-
- const auto vstream_params = expand_vstream_params_autos(output_stream->get_info(), name_params_pair->second);
-
- // For each mux vstream, we create a copy of the previous elements
- auto current_vstream_elements = base_elements;
-
- // For muxed VStreams we use the same pipeline_status for all
- auto pipeline_status_copy = pipeline_status;
- auto demux_queue_elem = PullQueueElement::create(
- PipelineObject::create_element_name("PullQueueElement_demux", edge_info.name, edge_info.index),
- vstream_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(demux_queue_elem);
- current_vstream_elements.push_back(demux_queue_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(demux_elem.value(), demux_queue_elem.value(), i, 0));
-
- demux_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
-
- auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
- CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
-
- auto should_transform = OutputTransformContext::is_transformation_required(edge_info.hw_shape,
- edge_info.format, edge_info.shape, vstream_params.user_buffer_format, edge_info.quant_info);
-
- if (should_transform) {
- auto post_infer_elem = PostInferElement::create(edge_info.hw_shape, edge_info.format,
- edge_info.shape, vstream_params.user_buffer_format, edge_info.quant_info, edge_info.nms_info,
- PipelineObject::create_element_name("PostInferElement", edge_info.name, edge_info.index),
- vstream_params, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(post_infer_elem);
- current_vstream_elements.push_back(post_infer_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(demux_queue_elem.value(), post_infer_elem.value()));
-
- auto post_infer_queue_elem = UserBufferQueueElement::create(
- PipelineObject::create_element_name("UserBufferQueueElement_post_infer", edge_info.name, edge_info.index),
- vstream_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(post_infer_queue_elem);
- current_vstream_elements.push_back(post_infer_queue_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
-
- auto vstream = OutputVStream::create(vstream_info->second, vstream_params, post_infer_queue_elem.release(), std::move(current_vstream_elements),
- std::move(pipeline_status_copy), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED_AS_STATUS(vstream);
- vstreams.emplace_back(vstream.release());
- } else {
- // TODO: HRT-4179
- auto user_copy_elem = CopyBufferElement::create(
- PipelineObject::create_element_name("CopyBufferElement", edge_info.name, edge_info.index),
- pipeline_status);
- CHECK_EXPECTED_AS_STATUS(user_copy_elem);
- current_vstream_elements.push_back(user_copy_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(demux_queue_elem.value(), user_copy_elem.value()));
-
- auto vstream = OutputVStream::create(vstream_info->second, vstream_params, user_copy_elem.release(), std::move(current_vstream_elements),
- std::move(pipeline_status_copy), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED_AS_STATUS(vstream);
- vstreams.emplace_back(vstream.release());
- }
- i++;
- }
- return HAILO_SUCCESS;
-}
-
-hailo_status VStreamsBuilderUtils::add_nms_fuse(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
- std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos)
-{
- std::vector<hailo_nms_info_t> nms_infos;
- nms_infos.reserve(output_streams.size());
- for (const auto &out_stream : output_streams) {
- CHECK(out_stream->get_info().nms_info.defuse_info.class_group_index <= output_streams.size(),
- HAILO_INVALID_ARGUMENT, "Not all defused nms outputs were grouped correctly!");
- nms_infos.emplace_back(out_stream->get_info().nms_info);
- }
-
- // To get the fused layer name and src stream format, we use the stream info of one of the defuses
- auto first_defused_stream_info = output_streams[0]->get_info();
- auto fused_layer_name = first_defused_stream_info.nms_info.defuse_info.original_name;
- auto src_stream_format = first_defused_stream_info.format;
-
- auto vstream_info = output_vstream_infos.find(fused_layer_name);
- CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
- "Failed to find vstream info of {}", fused_layer_name);
-
- vstreams_params = expand_vstream_params_autos(first_defused_stream_info, vstreams_params);
- auto nms_elem = NmsMuxElement::create(nms_infos,
- PipelineObject::create_element_name("NmsMuxElement", fused_layer_name, 0),
- vstreams_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(nms_elem);
- auto fused_layer_nms_info = nms_elem.value()->get_fused_nms_info();
-
- for (uint32_t i = 0; i < output_streams.size(); ++i) {
- const auto &curr_stream_info = output_streams[i]->get_info();
- output_streams[i]->set_timeout(HAILO_INFINITE_TIMEOUT);
-
- auto hw_read_elem = HwReadElement::create(output_streams[i],
- PipelineObject::create_element_name("HwReadElement", curr_stream_info.name, curr_stream_info.index),
- HAILO_INFINITE_TIMEOUT, vstreams_params.queue_size, vstreams_params.pipeline_elements_stats_flags,
- vstreams_params.vstream_stats_flags, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(hw_read_elem);
- elements.push_back(hw_read_elem.value());
-
- auto nms_source_queue_elem = PullQueueElement::create(
- PipelineObject::create_element_name("PullQueueElement_nms_source", curr_stream_info.name, curr_stream_info.index),
- vstreams_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(nms_source_queue_elem);
- elements.push_back(nms_source_queue_elem.value());
- nms_source_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
- CHECK_SUCCESS(PipelinePad::link_pads(hw_read_elem.value(), nms_source_queue_elem.value()));
- CHECK_SUCCESS(PipelinePad::link_pads(nms_source_queue_elem.value(), nms_elem.value(), 0, i));
- }
- elements.push_back(nms_elem.value());
-
- auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstreams_params);
- CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
-
- auto should_transform = OutputTransformContext::is_transformation_required({}, src_stream_format, {},
- vstreams_params.user_buffer_format, vstream_info->second.quant_info);
-
- EventPtr network_group_activated_event = nullptr;
- if (!output_streams[0]->is_scheduled()) {
- network_group_activated_event = output_streams[0]->get_network_group_activated_event();
- }
-
- if (should_transform) {
- auto nms_queue_elem = PullQueueElement::create(
- PipelineObject::create_element_name("PullQueueElement_nms", fused_layer_name, 0),
- vstreams_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(nms_queue_elem);
- nms_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
- elements.push_back(nms_queue_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(nms_elem.value(), nms_queue_elem.value()));
-
- auto post_infer_elem = PostInferElement::create({}, src_stream_format,
- {}, vstreams_params.user_buffer_format, vstream_info->second.quant_info, fused_layer_nms_info,
- PipelineObject::create_element_name("PostInferElement", fused_layer_name, 0), vstreams_params, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(post_infer_elem);
-
- elements.push_back(post_infer_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(nms_queue_elem.value(), post_infer_elem.value()));
-
- auto post_infer_queue_elem = UserBufferQueueElement::create(
- PipelineObject::create_element_name("UserBufferQueueElement_post_infer", fused_layer_name, 0),
- vstreams_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(post_infer_queue_elem);
- elements.push_back(post_infer_queue_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
-
- auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, post_infer_queue_elem.release(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED_AS_STATUS(vstream);
- vstreams.emplace_back(vstream.release());
- } else {
- auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, nms_elem.release(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED_AS_STATUS(vstream);
- vstreams.emplace_back(vstream.release());
- }
-
- return HAILO_SUCCESS;
-}
-
-hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
- std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
- const NetFlowYoloNmsElement &nms_op)
-{
- auto first_stream_info = output_streams[0]->get_info();
- if (vstreams_params.user_buffer_format.type == HAILO_FORMAT_TYPE_AUTO) {
- vstreams_params.user_buffer_format.type = HAILO_FORMAT_TYPE_FLOAT32;
- }
- if (vstreams_params.user_buffer_format.order == HAILO_FORMAT_ORDER_AUTO) {
- vstreams_params.user_buffer_format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
- }
- vstreams_params = expand_vstream_params_autos(first_stream_info, vstreams_params);
- CHECK(vstreams_params.user_buffer_format.type == HAILO_FORMAT_TYPE_FLOAT32, HAILO_INVALID_ARGUMENT,
- "NMS output format type must be HAILO_FORMAT_TYPE_FLOAT32");
- CHECK(vstreams_params.user_buffer_format.order == HAILO_FORMAT_ORDER_HAILO_NMS, HAILO_INVALID_ARGUMENT,
- "NMS output format order must be HAILO_FORMAT_ORDER_HAILO_NMS");
-
- hailo_nms_info_t nms_info = {
- nms_op.classes,
- nms_op.max_proposals_per_class,
- sizeof(hailo_bbox_float32_t),
- nms_op.input_division_factor,
- false,
- hailo_nms_defuse_info_t()
- };
-
- std::vector<hailo_3d_image_shape_t> shapes;
- shapes.reserve(output_streams.size());
- std::vector<hailo_format_t> formats;
- formats.reserve(output_streams.size());
- std::vector<hailo_quant_info_t> quant_infos;
- quant_infos.reserve(output_streams.size());
- for (uint32_t i = 0; i < output_streams.size(); ++i) {
- const auto &curr_stream_info = output_streams[i]->get_info();
- shapes.push_back(curr_stream_info.shape);
- formats.push_back(curr_stream_info.format);
- quant_infos.push_back(curr_stream_info.quant_info);
- }
-
- const auto &output_pads = nms_op.output_pads;
- assert(output_pads.size() == 1);
- auto vstream_info = output_vstream_infos.find(output_pads[0].name);
- CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
- "Failed to find vstream info of {}", nms_op.name);
-
- auto nms_elem = NmsPostProcessMuxElement::create(nms_op, shapes, formats, quant_infos, nms_info,
- PipelineObject::create_element_name("NmsPostProcessMuxElement", nms_op.name, 0),
- vstreams_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(nms_elem);
-
- hailo_format_t nms_src_format;
- nms_src_format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
- nms_src_format.order = HAILO_FORMAT_ORDER_NHWC;
- nms_src_format.type = first_stream_info.format.type;
-
- for (uint32_t i = 0; i < output_streams.size(); ++i) {
- const auto &curr_stream_info = output_streams[i]->get_info();
- output_streams[i]->set_timeout(HAILO_INFINITE_TIMEOUT);
-
- auto should_transform = OutputTransformContext::is_transformation_required(curr_stream_info.hw_shape, curr_stream_info.format,
- curr_stream_info.shape, nms_src_format, vstream_info->second.quant_info);
-
- std::unique_ptr<OutputTransformContext> transform_context = nullptr;
-
- if (should_transform) {
- auto expected_transform_context = OutputTransformContext::create(curr_stream_info.hw_shape, curr_stream_info.format,
- curr_stream_info.shape, nms_src_format, vstream_info->second.quant_info, nms_info);
- CHECK_EXPECTED_AS_STATUS(expected_transform_context);
- transform_context = expected_transform_context.release();
- }
-
- auto hw_read_elem = HwReadElement::create(output_streams[i],
- PipelineObject::create_element_name("HwReadElement", curr_stream_info.name, curr_stream_info.index),
- HAILO_INFINITE_TIMEOUT, vstreams_params.queue_size, vstreams_params.pipeline_elements_stats_flags,
- vstreams_params.vstream_stats_flags, shutdown_event, pipeline_status, std::move(transform_context));
- CHECK_EXPECTED_AS_STATUS(hw_read_elem);
- elements.push_back(hw_read_elem.value());
-
- auto nms_source_queue_elem = PullQueueElement::create(
- PipelineObject::create_element_name("PullQueueElement_nms_source", curr_stream_info.name, curr_stream_info.index),
- vstreams_params, shutdown_event, pipeline_status);
- CHECK_EXPECTED_AS_STATUS(nms_source_queue_elem);
- nms_source_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
- elements.push_back(nms_source_queue_elem.value());
- CHECK_SUCCESS(PipelinePad::link_pads(hw_read_elem.value(), nms_source_queue_elem.value()));
- CHECK_SUCCESS(PipelinePad::link_pads(nms_source_queue_elem.value(), nms_elem.value(), 0, i));
- }
- elements.push_back(nms_elem.value());
-
- auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstreams_params);
- CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
-
- EventPtr network_group_activated_event = nullptr;
- if (!output_streams[0]->is_scheduled()) {
- network_group_activated_event = output_streams[0]->get_network_group_activated_event();
- }
-
- auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, nms_elem.release(), std::move(elements),
- std::move(pipeline_status), shutdown_event, network_group_activated_event, pipeline_latency_accumulator.release());
- CHECK_EXPECTED_AS_STATUS(vstream);
- vstreams.emplace_back(vstream.release());
-
- return HAILO_SUCCESS;
-}
-
-Expected<AccumulatorPtr> VStreamsBuilderUtils::create_pipeline_latency_accumulator(const hailo_vstream_params_t &vstreams_params)
-{
- AccumulatorPtr pipeline_latency_accumulator = nullptr;
- const auto measure_latency = ((vstreams_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0);
- if (measure_latency) {
- pipeline_latency_accumulator = make_shared_nothrow<FullAccumulator<double>>("latency");
- CHECK_AS_EXPECTED(nullptr != pipeline_latency_accumulator, HAILO_OUT_OF_HOST_MEMORY);
- }
-
- return pipeline_latency_accumulator;
-}
-
-} /* namespace hailort */
+++ /dev/null
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vstream.hpp
- * @brief Virtual Stream.
- * Hence, the hierarchy is as follows:
- * ------------------------------------------------------------------------------------------------
- * | BaseVStream | (Internal "interface")
- * | ___________________________|___________________________ |
- * | / \ |
- * | InputVStreamInternal OutputVStreamInternal | (Base classes)
- * | / \ / \ |
- * | InputVStreamImpl InputVStreamClient OuputVStreamImpl OutputVStreamClient | (Actual implementations)
- * ------------------------------------------------------------------------------------------------
- * -- InputVStream (External 'interface')
- * |
- * |__ std::share_ptr<InputVStreamInternal>
- *
- * -- OutputVStream (External 'interface')
- * |
- * |__ std::share_ptr<OutputVStreamInternal>
- **/
-
-#ifndef _HAILO_VSTREAM_INTERNAL_HPP_
-#define _HAILO_VSTREAM_INTERNAL_HPP_
-
-#include "pipeline.hpp"
-#include "hef_internal.hpp"
-#include "net_flow/ops/yolo_post_processing.hpp"
-#include "hailo/transform.hpp"
-#include "hailo/stream.hpp"
-#include "context_switch/network_group_internal.hpp"
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-#include "hailort_rpc_client.hpp"
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-namespace hailort
-{
-
-/*! Virtual stream base class */
-class BaseVStream
-{
-public:
- BaseVStream(BaseVStream &&other) noexcept;
- BaseVStream& operator=(BaseVStream &&other) noexcept;
- virtual ~BaseVStream() = default;
-
- virtual size_t get_frame_size() const;
- virtual const hailo_vstream_info_t &get_info() const;
- virtual const hailo_format_t &get_user_buffer_format() const;
- virtual std::string name() const;
- virtual std::string network_name() const;
- virtual const std::map<std::string, AccumulatorPtr> &get_fps_accumulators() const;
- virtual const std::map<std::string, AccumulatorPtr> &get_latency_accumulators() const;
- virtual const std::map<std::string, std::vector<AccumulatorPtr>> &get_queue_size_accumulators() const;
- virtual AccumulatorPtr get_pipeline_latency_accumulator() const;
- virtual const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const;
-
- virtual hailo_status abort();
- virtual hailo_status resume();
- virtual hailo_status start_vstream();
- virtual hailo_status stop_vstream();
- virtual hailo_status stop_and_clear();
-
-protected:
- BaseVStream(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
- EventPtr &&network_group_activated_event, hailo_status &output_status);
- BaseVStream() = default;
-
- virtual std::string get_pipeline_description() const = 0;
-
- hailo_vstream_info_t m_vstream_info;
- hailo_vstream_params_t m_vstream_params;
- bool m_measure_pipeline_latency;
- std::shared_ptr<PipelineElement> m_entry_element;
- std::vector<std::shared_ptr<PipelineElement>> m_pipeline;
- volatile bool m_is_activated;
- volatile bool m_is_aborted;
- std::shared_ptr<std::atomic<hailo_status>> m_pipeline_status;
- EventPtr m_shutdown_event;
- EventPtr m_network_group_activated_event;
- std::map<std::string, AccumulatorPtr> m_fps_accumulators;
- std::map<std::string, AccumulatorPtr> m_latency_accumulators;
- std::map<std::string, std::vector<AccumulatorPtr>> m_queue_size_accumulators;
- AccumulatorPtr m_pipeline_latency_accumulator;
-};
-
-/*! Input virtual stream, used to stream data to device */
-class InputVStreamInternal : public BaseVStream
-{
-public:
- static Expected<std::shared_ptr<InputVStreamInternal>> create(const hailo_vstream_info_t &vstream_info,
- const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
- std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr network_group_activated_event,
- AccumulatorPtr pipeline_latency_accumulator);
- InputVStreamInternal(InputVStreamInternal &&other) noexcept = default;
- InputVStreamInternal &operator=(InputVStreamInternal &&other) noexcept = default;
- virtual ~InputVStreamInternal() = default;
-
- virtual hailo_status write(const MemoryView &buffer) = 0;
- virtual hailo_status flush() = 0;
-
- virtual std::string get_pipeline_description() const override;
-
-protected:
- InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
- EventPtr &&network_group_activated_event, hailo_status &output_status);
- InputVStreamInternal() = default;
-};
-
-/*! Output virtual stream, used to read data from device */
-class OutputVStreamInternal : public BaseVStream
-{
-public:
- static Expected<std::shared_ptr<OutputVStreamInternal>> create(
- const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- EventPtr network_group_activated_event, AccumulatorPtr pipeline_latency_accumulator);
- OutputVStreamInternal(OutputVStreamInternal &&other) noexcept = default;
- OutputVStreamInternal &operator=(OutputVStreamInternal &&other) noexcept = default;
- virtual ~OutputVStreamInternal() = default;
-
-
- virtual hailo_status read(MemoryView buffer) = 0;
- virtual std::string get_pipeline_description() const override;
-
-protected:
- OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
- EventPtr network_group_activated_event, hailo_status &output_status);
- OutputVStreamInternal() = default;
-};
-
-class InputVStreamImpl : public InputVStreamInternal
-{
-public:
- static Expected<std::shared_ptr<InputVStreamImpl>> create(const hailo_vstream_info_t &vstream_info,
- const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
- std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr network_group_activated_event,
- AccumulatorPtr pipeline_latency_accumulator);
- InputVStreamImpl(InputVStreamImpl &&) noexcept = default;
- InputVStreamImpl(const InputVStreamImpl &) = delete;
- InputVStreamImpl &operator=(InputVStreamImpl &&) noexcept = default;
- InputVStreamImpl &operator=(const InputVStreamImpl &) = delete;
- virtual ~InputVStreamImpl();
-
- virtual hailo_status write(const MemoryView &buffer) override;
- virtual hailo_status flush() override;
-private:
- InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
- EventPtr network_group_activated_event, hailo_status &output_status);
-};
-
-class OutputVStreamImpl : public OutputVStreamInternal
-{
-public:
- static Expected<std::shared_ptr<OutputVStreamImpl>> create(
- const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
- EventPtr network_group_activated_event, AccumulatorPtr pipeline_latency_accumulator);
- OutputVStreamImpl(OutputVStreamImpl &&) noexcept = default;
- OutputVStreamImpl(const OutputVStreamImpl &) = delete;
- OutputVStreamImpl &operator=(OutputVStreamImpl &&) noexcept = default;
- OutputVStreamImpl &operator=(const OutputVStreamImpl &) = delete;
- virtual ~OutputVStreamImpl();
-
- virtual hailo_status read(MemoryView buffer);
-
- void set_on_vstream_cant_read_callback(std::function<void()> callback)
- {
- m_cant_read_callback = callback;
- }
-
- void set_on_vstream_can_read_callback(std::function<void()> callback)
- {
- m_can_read_callback = callback;
- }
-
-private:
- OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
- std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
- EventPtr network_group_activated_event, hailo_status &output_status);
-
- std::function<void()> m_cant_read_callback;
- std::function<void()> m_can_read_callback;
-};
-
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-class InputVStreamClient : public InputVStreamInternal
-{
-public:
- static Expected<std::shared_ptr<InputVStreamClient>> create(uint32_t input_vstream_handle);
- InputVStreamClient(InputVStreamClient &&) noexcept = default;
- InputVStreamClient(const InputVStreamClient &) = delete;
- InputVStreamClient &operator=(InputVStreamClient &&) noexcept = default;
- InputVStreamClient &operator=(const InputVStreamClient &) = delete;
- virtual ~InputVStreamClient();
-
- virtual hailo_status write(const MemoryView &buffer) override;
- virtual hailo_status flush() override;
-
- virtual hailo_status abort() override;
- virtual hailo_status resume() override;
- virtual size_t get_frame_size() const override;
- virtual const hailo_vstream_info_t &get_info() const override;
- virtual const hailo_format_t &get_user_buffer_format() const override;
- virtual std::string name() const override;
- virtual std::string network_name() const override;
- virtual const std::map<std::string, AccumulatorPtr> &get_fps_accumulators() const override;
- virtual const std::map<std::string, AccumulatorPtr> &get_latency_accumulators() const override;
- virtual const std::map<std::string, std::vector<AccumulatorPtr>> &get_queue_size_accumulators() const override;
- virtual AccumulatorPtr get_pipeline_latency_accumulator() const override;
- virtual const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const override;
-
-private:
- InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t input_vstream_handle, hailo_format_t &&user_buffer_format,
- hailo_vstream_info_t &&info);
-
- std::unique_ptr<HailoRtRpcClient> m_client;
- uint32_t m_handle;
- hailo_format_t m_user_buffer_format;
- hailo_vstream_info_t m_info;
-};
-
-class OutputVStreamClient : public OutputVStreamInternal
-{
-public:
- static Expected<std::shared_ptr<OutputVStreamClient>> create(uint32_t outputs_vstream_handle);
- OutputVStreamClient(OutputVStreamClient &&) noexcept = default;
- OutputVStreamClient(const OutputVStreamClient &) = delete;
- OutputVStreamClient &operator=(OutputVStreamClient &&) noexcept = default;
- OutputVStreamClient &operator=(const OutputVStreamClient &) = delete;
- virtual ~OutputVStreamClient();
-
- virtual hailo_status read(MemoryView buffer);
-
- virtual hailo_status abort() override;
- virtual hailo_status resume() override;
- virtual size_t get_frame_size() const override;
- virtual const hailo_vstream_info_t &get_info() const override;
- virtual const hailo_format_t &get_user_buffer_format() const override;
- virtual std::string name() const override;
- virtual std::string network_name() const override;
- virtual const std::map<std::string, AccumulatorPtr> &get_fps_accumulators() const override;
- virtual const std::map<std::string, AccumulatorPtr> &get_latency_accumulators() const override;
- virtual const std::map<std::string, std::vector<AccumulatorPtr>> &get_queue_size_accumulators() const override;
- virtual AccumulatorPtr get_pipeline_latency_accumulator() const override;
- virtual const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const override;
-
-private:
- OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t outputs_vstream_handle, hailo_format_t &&user_buffer_format,
- hailo_vstream_info_t &&info);
-
- std::unique_ptr<HailoRtRpcClient> m_client;
- uint32_t m_handle;
- hailo_format_t m_user_buffer_format;
- hailo_vstream_info_t m_info;
-};
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-
-class PreInferElement : public FilterElement
-{
-public:
- static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
- hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const std::string &name,
- const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- PreInferElement(std::unique_ptr<InputTransformContext> &&transform_context, BufferPoolPtr buffer_pool,
- const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual ~PreInferElement() = default;
-
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
- virtual PipelinePad &next_pad() override;
- virtual std::string description() const override;
-
-protected:
- virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
-
-private:
- std::unique_ptr<InputTransformContext> m_transform_context;
- BufferPoolPtr m_pool;
- std::chrono::milliseconds m_timeout;
-};
-
-class PostInferElement : public FilterElement
-{
-public:
- static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape,
- const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
- const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, const std::string &name,
- hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
- const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
- const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- PostInferElement(std::unique_ptr<OutputTransformContext> &&transform_context, const std::string &name,
- DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- virtual ~PostInferElement() = default;
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual PipelinePad &next_pad() override;
- virtual std::string description() const override;
-
-protected:
- virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
-
-private:
- std::unique_ptr<OutputTransformContext> m_transform_context;
-};
-
-class NmsPostProcessMuxElement : public BaseMuxElement
-{
-public:
- static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(const NetFlowYoloNmsElement &nms_op,
- const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
- const std::vector<hailo_quant_info_t> &quant_infos, hailo_format_t format, hailo_nms_info_t nms_info,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
- hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(const NetFlowYoloNmsElement &nms_op,
- const std::vector<hailo_3d_image_shape_t> &shapes, const std::vector<hailo_format_t> &formats,
- const std::vector<hailo_quant_info_t> &quant_infos, hailo_nms_info_t nms_info, const std::string &name,
- const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- NmsPostProcessMuxElement(const net_flow::YOLOv5PostProcessingOp &nms_op, BufferPoolPtr &&pool, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
-
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
-
-protected:
- virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) override;
-
-private:
- net_flow::YOLOv5PostProcessingOp m_nms_op;
- BufferPoolPtr m_pool;
-};
-
-class NmsMuxElement : public BaseMuxElement
-{
-public:
- static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
- hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos, const std::string &name,
- const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- NmsMuxElement(const std::vector<hailo_nms_info_t> &nms_infos, const hailo_nms_info_t &fused_nms_info, BufferPoolPtr &&pool, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
- const hailo_nms_info_t &get_fused_nms_info() const;
-
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
-
-protected:
- virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) override;
-
-private:
- std::vector<hailo_nms_info_t> m_nms_infos;
- hailo_nms_info_t m_fused_nms_info;
- BufferPoolPtr m_pool;
-};
-
-class TransformDemuxElement : public BaseDemuxElement
-{
-public:
- static Expected<std::shared_ptr<TransformDemuxElement>> create(std::shared_ptr<OutputDemuxer> demuxer,
- const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
- hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- TransformDemuxElement(std::shared_ptr<OutputDemuxer> demuxer, std::vector<BufferPoolPtr> &&pools, const std::string &name,
- std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
-
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
-
-protected:
- virtual Expected<std::vector<PipelineBuffer>> action(PipelineBuffer &&input) override;
-
-private:
- std::shared_ptr<OutputDemuxer> m_demuxer;
- std::vector<BufferPoolPtr> m_pools;
-};
-
-class HwReadElement : public SourceElement
-{
-public:
- static Expected<std::shared_ptr<HwReadElement>> create(std::shared_ptr<OutputStream> stream, const std::string &name, std::chrono::milliseconds timeout,
- size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
- std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::unique_ptr<OutputTransformContext> m_transform_context = nullptr);
- HwReadElement(std::shared_ptr<OutputStream> stream, BufferPoolPtr buffer_pool, const std::string &name, std::chrono::milliseconds timeout,
- DurationCollector &&duration_collector, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
- BufferPoolPtr transform_pool = nullptr, std::unique_ptr<OutputTransformContext> transform_context = nullptr);
- virtual ~HwReadElement() = default;
-
- virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
- virtual hailo_status execute_activate() override;
- virtual hailo_status execute_deactivate() override;
- virtual hailo_status execute_post_deactivate() override;
- virtual hailo_status execute_clear() override;
- virtual hailo_status execute_flush() override;
- virtual hailo_status execute_abort() override;
- virtual hailo_status execute_resume() override;
- virtual hailo_status execute_wait_for_finish() override;
- uint32_t get_invalid_frames_count();
- virtual std::string description() const override;
-
-private:
- std::shared_ptr<OutputStream> m_stream;
- BufferPoolPtr m_pool;
- BufferPoolPtr m_transform_pool;
- std::chrono::milliseconds m_timeout;
- EventPtr m_shutdown_event;
- WaitOrShutdown m_activation_wait_or_shutdown;
- std::unique_ptr<OutputTransformContext> m_transform_context;
-};
-
-class HwWriteElement : public SinkElement
-{
-public:
- static Expected<std::shared_ptr<HwWriteElement>> create(std::shared_ptr<InputStream> stream, const std::string &name,
- hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- HwWriteElement(std::shared_ptr<InputStream> stream, const std::string &name, DurationCollector &&duration_collector,
- std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event);
- virtual ~HwWriteElement() = default;
-
- virtual hailo_status run_push(PipelineBuffer &&buffer) override;
- virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
- virtual hailo_status execute_activate() override;
- virtual hailo_status execute_deactivate() override;
- virtual hailo_status execute_post_deactivate() override;
- virtual hailo_status execute_clear() override;
- virtual hailo_status execute_flush() override;
- virtual hailo_status execute_abort() override;
- virtual hailo_status execute_resume() override;
- virtual hailo_status execute_wait_for_finish() override;
- virtual std::string description() const override;
-
-private:
- std::shared_ptr<InputStream> m_stream;
- EventPtr m_got_flush_event;
-};
-
-class CopyBufferElement : public FilterElement
-{
-public:
- static Expected<std::shared_ptr<CopyBufferElement>> create(const std::string &name, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- CopyBufferElement(const std::string &name, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
- virtual ~CopyBufferElement() = default;
- virtual PipelinePad &next_pad() override;
-
-protected:
- virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
-};
-
-class VStreamsBuilderUtils
-{
-public:
- static Expected<std::vector<InputVStream>> create_inputs(std::shared_ptr<InputStream> input_stream, const hailo_vstream_info_t &input_vstream_infos,
- const hailo_vstream_params_t &vstreams_params);
- static Expected<std::vector<OutputVStream>> create_outputs(std::shared_ptr<OutputStream> output_stream,
- NameToVStreamParamsMap &vstreams_params_map, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
- static InputVStream create_input(std::shared_ptr<InputVStreamInternal> input_vstream);
- static OutputVStream create_output(std::shared_ptr<OutputVStreamInternal> output_vstream);
- static Expected<std::vector<OutputVStream>> create_output_nms(OutputStreamPtrVector &output_streams,
- hailo_vstream_params_t vstreams_params,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
- static Expected<std::vector<OutputVStream>> create_output_post_process_nms(OutputStreamPtrVector &output_streams,
- hailo_vstream_params_t vstreams_params,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
- const NetFlowYoloNmsElement &nms_op);
- static hailo_status add_demux(std::shared_ptr<OutputStream> output_stream, NameToVStreamParamsMap &vstreams_params_map,
- std::vector<std::shared_ptr<PipelineElement>> &&elements, std::vector<OutputVStream> &vstreams,
- std::shared_ptr<HwReadElement> hw_read_elem, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
- static hailo_status add_nms_fuse(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
- std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
- static hailo_status add_nms_post_process(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
- std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
- EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
- const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
- const NetFlowYoloNmsElement &nms_op);
- static Expected<AccumulatorPtr> create_pipeline_latency_accumulator(const hailo_vstream_params_t &vstreams_params);
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_VSTREAM_INTERNAL_HPP_ */
service ProtoHailoRtRpc {
rpc client_keep_alive (keepalive_Request) returns (empty) {}
rpc get_service_version (get_service_version_Request) returns (get_service_version_Reply) {}
+
rpc VDevice_create (VDevice_create_Request) returns (VDevice_create_Reply) {}
+ rpc VDevice_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
rpc VDevice_release (Release_Request) returns (Release_Reply) {}
rpc VDevice_configure (VDevice_configure_Request) returns (VDevice_configure_Reply) {}
rpc VDevice_get_physical_devices_ids (VDevice_get_physical_devices_ids_Request) returns (VDevice_get_physical_devices_ids_Reply) {}
rpc VDevice_get_default_streams_interface (VDevice_get_default_streams_interface_Request) returns (VDevice_get_default_streams_interface_Reply) {}
+ rpc ConfiguredNetworkGroup_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
rpc ConfiguredNetworkGroup_release (Release_Request) returns (Release_Reply) {}
rpc ConfiguredNetworkGroup_make_input_vstream_params (ConfiguredNetworkGroup_make_input_vstream_params_Request) returns (ConfiguredNetworkGroup_make_input_vstream_params_Reply) {}
rpc ConfiguredNetworkGroup_make_output_vstream_params (ConfiguredNetworkGroup_make_output_vstream_params_Request) returns (ConfiguredNetworkGroup_make_output_vstream_params_Reply) {}
rpc ConfiguredNetworkGroup_get_input_vstream_infos (ConfiguredNetworkGroup_get_vstream_infos_Request) returns (ConfiguredNetworkGroup_get_vstream_infos_Reply) {}
rpc ConfiguredNetworkGroup_get_output_vstream_infos (ConfiguredNetworkGroup_get_vstream_infos_Request) returns (ConfiguredNetworkGroup_get_vstream_infos_Reply) {}
rpc ConfiguredNetworkGroup_get_all_vstream_infos (ConfiguredNetworkGroup_get_vstream_infos_Request) returns (ConfiguredNetworkGroup_get_vstream_infos_Reply) {}
+ rpc ConfiguredNetworkGroup_is_scheduled (ConfiguredNetworkGroup_is_scheduled_Request) returns (ConfiguredNetworkGroup_is_scheduled_Reply) {}
rpc ConfiguredNetworkGroup_set_scheduler_timeout (ConfiguredNetworkGroup_set_scheduler_timeout_Request) returns (ConfiguredNetworkGroup_set_scheduler_timeout_Reply) {}
rpc ConfiguredNetworkGroup_set_scheduler_threshold (ConfiguredNetworkGroup_set_scheduler_threshold_Request) returns (ConfiguredNetworkGroup_set_scheduler_threshold_Reply) {}
+ rpc ConfiguredNetworkGroup_set_scheduler_priority (ConfiguredNetworkGroup_set_scheduler_priority_Request) returns (ConfiguredNetworkGroup_set_scheduler_priority_Reply) {}
rpc ConfiguredNetworkGroup_get_latency_measurement (ConfiguredNetworkGroup_get_latency_measurement_Request) returns (ConfiguredNetworkGroup_get_latency_measurement_Reply) {}
rpc ConfiguredNetworkGroup_is_multi_context (ConfiguredNetworkGroup_is_multi_context_Request) returns (ConfiguredNetworkGroup_is_multi_context_Reply) {}
rpc ConfiguredNetworkGroup_get_config_params(ConfiguredNetworkGroup_get_config_params_Request) returns (ConfiguredNetworkGroup_get_config_params_Reply) {}
rpc InputVStreams_create (VStream_create_Request) returns (VStreams_create_Reply) {}
+ rpc InputVStream_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
+ rpc OutputVStream_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
rpc InputVStream_release (Release_Request) returns (Release_Reply) {}
rpc OutputVStreams_create (VStream_create_Request) returns (VStreams_create_Reply) {}
rpc OutputVStream_release (Release_Request) returns (Release_Reply) {}
message empty {}
message keepalive_Request {
- uint32 process_id = 1;
+ uint32 pid = 1;
}
message ProtoVDeviceParams {
ProtoHailoVersion hailo_version = 2;
}
+message dup_handle_Request {
+ uint32 pid = 1;
+ uint32 handle = 2;
+}
+
+message dup_handle_Reply {
+ uint32 handle = 1;
+}
+
message VDevice_create_Request {
ProtoVDeviceParams hailo_vdevice_params = 1;
uint32 pid = 2;
message ProtoStreamsParams {
uint32 stream_interface = 1;
uint32 direction = 2;
+ uint32 flags = 3;
}
message ProtoNamedStreamParams {
repeated ProtoVStreamInfo vstream_infos = 2;
}
+message ConfiguredNetworkGroup_is_scheduled_Request {
+ uint32 handle = 1;
+}
+
+message ConfiguredNetworkGroup_is_scheduled_Reply {
+ uint32 status = 1;
+ bool is_scheduled = 2;
+}
+
message ConfiguredNetworkGroup_set_scheduler_timeout_Request {
uint32 handle = 1;
uint32 timeout_ms = 2;
uint32 status = 1;
}
+message ConfiguredNetworkGroup_set_scheduler_priority_Request {
+ uint32 handle = 1;
+ uint32 priority = 2;
+ string network_name = 3;
+}
+
+message ConfiguredNetworkGroup_set_scheduler_priority_Reply {
+ uint32 status = 1;
+}
+
message ConfiguredNetworkGroup_get_latency_measurement_Reply {
uint32 status = 1;
uint32 avg_hw_latency = 2;
namespace hailort
{
+#ifdef _WIN32
+static const std::string HAILORT_SERVICE_DEFAULT_ADDR = "127.0.0.1:50051";
+#else
static const std::string HAILO_UDS_PREFIX = "unix://";
static const std::string HAILO_DEFAULT_SERVICE_ADDR = "/tmp/hailort_uds.sock";
-static const std::string HAILO_DEFAULT_UDS_ADDR = HAILO_UDS_PREFIX + HAILO_DEFAULT_SERVICE_ADDR;
-static const uint32_t HAILO_KEEPALIVE_INTERVAL_SEC = 2;
+static const std::string HAILORT_SERVICE_DEFAULT_ADDR = HAILO_UDS_PREFIX + HAILO_DEFAULT_SERVICE_ADDR;
+#endif
+static const std::chrono::seconds HAILO_KEEPALIVE_INTERVAL(2);
}
@ECHO OFF
set BASE_URI=https://hailo-hailort.s3.eu-west-2.amazonaws.com
-set HRT_VERSION=4.12.1
+set HRT_VERSION=4.13.0
set FW_DIR=Hailo8/%HRT_VERSION%/FW
set FW=hailo8_fw.%HRT_VERSION%_eth.bin
set -e
readonly BASE_URI="https://hailo-hailort.s3.eu-west-2.amazonaws.com"
-readonly HRT_VERSION=4.12.1
+readonly HRT_VERSION=4.13.0
readonly FW_AWS_DIR="Hailo8/${HRT_VERSION}/FW"
readonly FW="hailo8_fw.${HRT_VERSION}_eth.bin"
:: cmd
@ECHO OFF
set BASE_URI=https://hailo-hailort.s3.eu-west-2.amazonaws.com
-set HRT_VERSION=4.12.1
+set HRT_VERSION=4.13.0
set REMOTE_HEF_DIR=Hailo8/%HRT_VERSION%/HEFS
set LOCAL_EXAMPLES_HEF_DIR=..\libhailort\examples\hefs
set LOCAL_TUTORIALS_HEF_DIR=..\libhailort\bindings\python\platform\tutorials\hefs
set -e
readonly BASE_URI="https://hailo-hailort.s3.eu-west-2.amazonaws.com"
-readonly HRT_VERSION=4.12.1
+readonly HRT_VERSION=4.13.0
readonly REMOTE_HEF_DIR="Hailo8/${HRT_VERSION}/HEFS"
readonly LOCAL_EXAMPLES_HEF_DIR="../libhailort/examples/hefs"
readonly LOCAL_TUTORIALS_HEF_DIR="../libhailort/bindings/python/platform/tutorials/hefs/"
readline_wrapper.cpp
driver_memory.cpp
memory_commands.cpp
- mercury_fields.cpp
+ hailo15_fields.cpp
# Depends on hailort_driver and its dependencies
${HAILO_OS_DIR}/hailort_driver.cpp
*/
#include "driver_memory.hpp"
-#include "mercury_fields.hpp"
+#include "hailo15_fields.hpp"
DriverMemorySource::DriverMemorySource(std::shared_ptr<HailoRTDriver> driver, HailoRTDriver::MemoryType memory_type) :
m_driver(driver),
static constexpr size_t VDMA_CHANNELS_COUNT = 32;
+static constexpr size_t VDMA_H2D_CHANNELS_COUNT = 16;
#pragma pack(push, 1)
struct VdmaDataPerDirection {
static_assert(0x10 == sizeof(VdmaDataPerDirection), "Invalid VdmaDataPerDirection size");
struct VdmaChannelData {
- VdmaDataPerDirection h2d;
- VdmaDataPerDirection d2h;
+ VdmaDataPerDirection src;
+ VdmaDataPerDirection dest;
};
#pragma pack(pop)
throw std::runtime_error(fmt::format("Failed reading memory, status {}", status));
}
- return fmt::format("channel[{}] (offset=0x{:X} size=0x{:X}):\n", index, index * sizeof(data), sizeof(data)) +
- fmt::format(" host: {}\n", print_direction(data.h2d)) +
- fmt::format(" device: {}\n", print_direction(data.d2h));
+ return fmt::format("channel[{}] (offset=0x{:X} size=0x{:X} type= {}):\n", index, index * sizeof(data), sizeof(data),
+ index < VDMA_H2D_CHANNELS_COUNT ? "H2D" : "D2H") +
+ fmt::format(" Src status: {}\n", print_src_status(data.src)) +
+ fmt::format(" Dest status: {}\n", print_dest_status(data.dest)) +
+ fmt::format(" Src: {}\n", print_direction(data.src)) +
+ fmt::format(" Dest: {}\n", print_direction(data.dest));
}
private:
+ static std::string print_src_status(const VdmaDataPerDirection &data) {
+ auto max_desc_mask = static_cast<uint16_t>((1 << data.depth) - 1);
+ std::string status =
+ data.error ? "CHANNEL ERROR" :
+ !data.start_abort ? "ABORTED" :
+ data.pause_resume ? "PAUSED" :
+ (data.num_ongoing & max_desc_mask) != (data.num_processed & max_desc_mask) ? "DURING TRANSFER" :
+ (data.num_available & max_desc_mask) != (data.num_processed & max_desc_mask) ? "WAITING TO SEND" :
+ "IDLE";
+ return status;
+ }
+
+ static std::string print_dest_status(const VdmaDataPerDirection &data) {
+ auto max_desc_mask = static_cast<uint16_t>((1 << data.depth) - 1);
+ std::string status =
+ data.error ? "CHANNEL ERROR" :
+ !data.start_abort ? "ABORTED" :
+ data.pause_resume ? "PAUSED" :
+ (data.num_ongoing & max_desc_mask) != (data.num_processed & max_desc_mask) ? "DURING TRANSFER" :
+ (data.num_available & max_desc_mask) != (data.num_processed & max_desc_mask) ? "WAITING TO RECEIVE" :
+ "IDLE";
+ return status;
+ }
+
static std::string print_direction(const VdmaDataPerDirection &data)
{
return fmt::format(
--- /dev/null
+/**
+ * @file hailo15_fields.cpp
+ * @brief Contains all memory fields related to hailo15
+ */
+
+#include "hailo15_fields.hpp"
+#include "hw_consts/hailo15/dram_dma_engine_config_regs.h"
+
+// Implement our own offsetof to allow access to array
+#define my_offsetof(type,field) ((size_t)(&(((type*)(0))->field)))
+#define dram_dma_offsetof(field) my_offsetof(DRAM_DMA_ENGINE_CONFIG_t, field)
+
+
+static constexpr auto CCB_ADDRESS_SHIFT = 9;
+
+
+QddcField::QddcField() :
+ Field("qddc", "Queue dest device channel (qddc)")
+{}
+
+size_t QddcField::elements_count() const
+{
+ return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
+}
+
+std::string QddcField::print_element(MemorySource& memory, size_t index) const
+{
+ return fmt::format("qddc[{}] enabled={} mode={} shmifo_id={}\n", index,
+ is_enabled(memory, index), mode(memory, index), shmifo_id(memory, index));
+}
+
+bool QddcField::is_enabled(MemorySource &memory, size_t index) const
+{
+ return (1 == memory.read<uint32_t>(dram_dma_offsetof(QddcEnable[index])));
+}
+
+uint32_t QddcField::shmifo_id(MemorySource &memory, size_t index) const
+{
+ return memory.read<uint32_t>(dram_dma_offsetof(QddcShmifoId[index]));
+}
+
+std::string QddcField::mode(MemorySource &memory, size_t index) const
+{
+ const auto mode = memory.read<uint32_t>(dram_dma_offsetof(QddcMode[index]));
+ switch (mode) {
+ case 0: return "CONTINUOUS";
+ case 1: return "BURST";
+ default:
+ return fmt::format("Unknown {}", mode);
+ }
+}
+
+QsdcField::QsdcField() :
+ Field("qsdc", "Queue source device channel (qsdc)")
+{}
+
+size_t QsdcField::elements_count() const
+{
+ return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
+}
+
+std::string QsdcField::print_element(MemorySource& memory, size_t index) const
+{
+ return fmt::format("qsdc[{}] enabled={} shmifo_id={}\n", index,
+ is_enabled(memory, index), shmifo_id(memory, index));
+}
+
+bool QsdcField::is_enabled(MemorySource &memory, size_t index) const
+{
+ return (1 == memory.read<uint32_t>(dram_dma_offsetof(QsdcEnable[index])));
+}
+
+uint32_t QsdcField::shmifo_id(MemorySource &memory, size_t index) const
+{
+ return memory.read<uint32_t>(dram_dma_offsetof(QsdcShmifoId[index]));
+}
+
+QdmcField::QdmcField() :
+ Field("qdmc", "Queue dest memory channel (qdmc)")
+{}
+
+size_t QdmcField::elements_count() const
+{
+ return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
+}
+
+std::string QdmcField::print_element(MemorySource& memory, size_t index) const
+{
+ return fmt::format("qdmc[{}] enabled={} address=0x{:x} desc_count={} desc_per_irq={}\n", index,
+ is_enabled(memory, index), base_address(memory, index), descriptors_count(memory, index),
+ descriptors_per_irq(memory, index));
+}
+
+bool QdmcField::is_enabled(MemorySource &memory, size_t index) const
+{
+ return (1 == memory.read<uint32_t>(dram_dma_offsetof(QdmcEnable[index])));
+}
+
+uint64_t QdmcField::base_address(MemorySource &memory, size_t index) const
+{
+ const uint64_t address = memory.read<uint32_t>(dram_dma_offsetof(QdmcMemBaseAddr[index]));
+ return address << CCB_ADDRESS_SHIFT;
+}
+
+uint32_t QdmcField::descriptors_count(MemorySource &memory, size_t index) const
+{
+ if (index > DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH) {
+ return memory.read<uint32_t>(dram_dma_offsetof(QdmcMemCcbSize[index - DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH]));
+ }
+ else {
+ const auto desc_count_log2 = memory.read<uint32_t>(dram_dma_offsetof(QdmcMemCcbSizeLog2[index]));
+ uint32_t size = 1;
+ for (uint32_t i = 0; i < desc_count_log2; i++) {
+ size <<= 1;
+ }
+ return size;
+ }
+}
+
+uint32_t QdmcField::descriptors_per_irq(MemorySource &memory, size_t index) const
+{
+ return memory.read<uint32_t>(dram_dma_offsetof(QdmcDescCsInterrupt[index]));
+}
+
+QsmcField::QsmcField() :
+ Field("qsmc", "Queue source memory channel (qsmc)")
+{}
+
+size_t QsmcField::elements_count() const
+{
+ return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
+}
+
+std::string QsmcField::print_element(MemorySource& memory, size_t index) const
+{
+ return fmt::format("qdmc[{}] mode={} enabled={} address=0x{:x} desc_count={}\n", index,
+ mode(memory, index), is_enabled(memory, index), base_address(memory, index), descriptors_count(memory, index));
+}
+
+bool QsmcField::is_enabled(MemorySource &memory, size_t index) const
+{
+ return (1 == memory.read<uint32_t>(dram_dma_offsetof(QsmcEnable[index])));
+}
+
+uint64_t QsmcField::base_address(MemorySource &memory, size_t index) const
+{
+ const uint64_t address = memory.read<uint32_t>(dram_dma_offsetof(QsmcMemBaseAddr[index]));
+ return address << CCB_ADDRESS_SHIFT;
+}
+
+uint32_t QsmcField::descriptors_count(MemorySource &memory, size_t index) const
+{
+ const auto desc_count = memory.read<uint32_t>(dram_dma_offsetof(QsmcMemCcbSize[index]));
+ return desc_count + 1; // The reg contains desc_count-1
+}
+
+std::string QsmcField::mode(MemorySource &memory, size_t index) const
+{
+ const auto mode = memory.read<uint32_t>(dram_dma_offsetof(QsmcMode[index]));
+ switch (mode) {
+ case 0: return "CONTINUOUS";
+ case 2: return "BURST";
+ case 3: // C2C mode
+ {
+ auto c2c_sel = memory.read<uint32_t>(dram_dma_offsetof(QsmcC2cSel[index]));
+ return fmt::format("C2C (from {})", c2c_sel);
+ }
+ default:
+ return fmt::format("Unknown {}", mode);
+ }
+}
--- /dev/null
+/**
+ * @file hailo15_fields.hpp
+ * @brief Contains all memory fields related to hailo15
+ */
+
+#ifndef _HW_DEBUG_HAILO15_FIELDS_H_
+#define _HW_DEBUG_HAILO15_FIELDS_H_
+
+#include "memory_commands.hpp"
+
+
+class QddcField : public Field {
+public:
+ QddcField();
+
+ virtual size_t elements_count() const override;
+ virtual std::string print_element(MemorySource& memory, size_t index) const override;
+
+private:
+ bool is_enabled(MemorySource &memory, size_t index) const;
+ uint32_t shmifo_id(MemorySource &memory, size_t index) const;
+ std::string mode(MemorySource &memory, size_t index) const;
+};
+
+class QsdcField : public Field {
+public:
+ QsdcField();
+
+ virtual size_t elements_count() const override;
+ virtual std::string print_element(MemorySource& memory, size_t index) const override;
+
+private:
+ bool is_enabled(MemorySource &memory, size_t index) const;
+ uint32_t shmifo_id(MemorySource &memory, size_t index) const;
+};
+
+
+class QdmcField : public Field {
+public:
+ QdmcField();
+
+ virtual size_t elements_count() const override;
+ virtual std::string print_element(MemorySource& memory, size_t index) const override;
+
+private:
+ bool is_enabled(MemorySource &memory, size_t index) const;
+ uint64_t base_address(MemorySource &memory, size_t index) const;
+ uint32_t descriptors_count(MemorySource &memory, size_t index) const;
+ uint32_t descriptors_per_irq(MemorySource &memory, size_t index) const;
+};
+
+class QsmcField : public Field {
+public:
+ QsmcField();
+
+ virtual size_t elements_count() const override;
+ virtual std::string print_element(MemorySource& memory, size_t index) const override;
+
+private:
+ bool is_enabled(MemorySource &memory, size_t index) const;
+ uint64_t base_address(MemorySource &memory, size_t index) const;
+ uint32_t descriptors_count(MemorySource &memory, size_t index) const;
+ std::string mode(MemorySource &memory, size_t index) const;
+};
+
+#endif /* _HW_DEBUG_HAILO15_FIELDS_H_ */
--- /dev/null
+/*-------------------------------------------------------------------------------------
+// Copyright (c) 2022 by Hailotech This model is the confidential and
+// proprietary property of Hailotech and the possession or use of this
+// file requires a written license from Hailotech.
+-------------------------------------------------------------------------------------*/
+
+
+
+#include <stdint.h>
+
+#ifndef DRAM_DMA_ENGINE_CONFIG_MACRO_H
+#define DRAM_DMA_ENGINE_CONFIG_MACRO_H
+
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCENABLE : val */
+/* Description: Enable per channel,when disabled do not give credits to vDMA */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCRESET : val */
+/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCMODE : val */
+/* Description: 0 - CONT_MODE. 1 - BURST_MODE */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCADDBURSTVAL : val */
+/* Description: Writing to this register increment the remain burst counter in QDDC by QddcAddBurstVal x 8 Bytes: RemainBurstCount += QddcAddBurstVal. Reading this register should return the current available credit counter (RemainBurstCount) in 2s complement format - can be negative. Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__WIDTH (27)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__MASK (0x07FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x07FFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x07FFFFFFL) | (((uint32_t)(value) << 0) & 0x07FFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x07FFFFFFL) | 0x07FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x07FFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCMAXDESC : val */
+/* Description: Maximum in flight descriptors,this is a TH for number of descriptors the QM might give the vDMA. 3'd0 - 1 descriptor (debug mode). 3'd1 - N_QM_DESC*1/8 (2). 3'd2 - N_QM_DESC*2/8 (4). 3'd3 - N_QM_DESC*3/8 (6). 3'd4 - N_QM_DESC*2/4 (8). 3'd5 - N_QM_DESC*5/8 (10). 3'd6 - N_QM_DESC*6/8 (12). 3'd7 - N_QM_DESC-1 (15-maximum),default. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__RESET (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCSHMIFOID : val */
+/* Description: The RX-SHMIFO ID. Used to know the SHMIFO base address (from a global parameter/define) and used to select the correct SHMIFO credit signal (nn_core_inbound_buffer_ready_pulse). 0-19: for DSM-RX 0-19. 20-23: for CSM 0-3. 24-30: reserved. 31: NULL ignore any credit from NN Core. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__WIDTH (5)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__MASK (0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__RESET (0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000001FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL) | (((uint32_t)(value) << 0) & 0x0000001FL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL) | 0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCSHMIFOCREDITSIZE : val */
+/* Description: The credit size in 8B granularity minus 1. 0 - indicates 8B 1 - indicates 16B ... 10'd1023 - indicates 8kB */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__WIDTH (10)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__MASK (0x000003FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000003FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000003FFL) | (((uint32_t)(value) << 0) & 0x000003FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000003FFL) | 0x000003FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000003FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDCSHMIFOINITCREDIT : val */
+/* Description: Writing to this register set the amount of credit from SHMIFO RX (AvailableCredits),used to configure the initial amount of credits,reading this register should return the value of AvailableCredits. Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__WIDTH (13)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__MASK (0x00001FFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00001FFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00001FFFL) | (((uint32_t)(value) << 0) & 0x00001FFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00001FFFL) | 0x00001FFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00001FFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCENABLE : val */
+/* Description: Enable per channel,when disabled do not give credits to vDMA */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCRESET : val */
+/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCMAXDESC : val */
+/* Description: Maximum in flight descriptors,this is a TH for number of descriptors the QM might give the vDMA. 3'd0 - 1 descriptor (debug mode). 3'd1 - N_QM_DESC*1/8 (2). 3'd2 - N_QM_DESC*2/8 (4). 3'd3 - N_QM_DESC*3/8 (6). 3'd4 - N_QM_DESC*4/8 (8). 3'd5 - N_QM_DESC*5/8 (10). 3'd6 - N_QM_DESC*6/8 (12). 3'd7 - N_QM_DESC-1 (15-maximum),default. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__RESET (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCSHMIFOID : val */
+/* Description: The TX-SHMIFO ID. Used to know the SHMIFO base address (from a global parameter/define) and used to select the correct SHMIFO credit signal (nn_core_outbound_buffer_valid_pulse). 0-19: for DSM-TX 0-19. 20-30: reserved. 31: NULL ignore any credit from NN Core. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__WIDTH (5)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__MASK (0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__RESET (0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000001FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL) | (((uint32_t)(value) << 0) & 0x0000001FL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL) | 0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCSHMIFOCREDITSIZE : val */
+/* Description: The credit size in 8B granularity minus 1. 0 - indicates 8B 1 - indicates 16B ... 10'd1023 - indicates 8kB */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__WIDTH (10)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__MASK (0x000003FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000003FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000003FFL) | (((uint32_t)(value) << 0) & 0x000003FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000003FFL) | 0x000003FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000003FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCFULLNUMPATTERNS : val */
+/* Description: Number of patterns per pattern ID minus one. 0 - one pattern,1 - two patterns,...,3 - four patterns. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__WIDTH (2)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__MASK (0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCFULLPATTERNNUMLINES : val */
+/* Description: Number of lines per pattern. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCFULLPATTERNNUMPAGES : val */
+/* Description: Number of pages per line. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCFULLPATTERNPAGESIZE : val */
+/* Description: page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__WIDTH (9)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__MASK (0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCFULLPATTERNRESIDUEPAGESIZE : val */
+/* Description: Residue page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__WIDTH (9)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__MASK (0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCSIMPPATTERNNUMPAGES : val */
+/* Description: Number of pages per line (simplified pattern has single line/pattern). */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCSIMPPATTERNPAGESIZE : val */
+/* Description: Log2(Page size/512B),valid values are 0 to PAGE_SIZE_MAX-10. 0 - 512B,1 - 1kB,2 - 2kB,3 - 4kB */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__WIDTH (2)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__MASK (0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDCSIMPPATTERNRESIDUEPAGESIZE : val */
+/* Description: Residue page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__WIDTH (9)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__MASK (0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCENABLE : val */
+/* Description: Enable per channel,when disabled do not give credits to vDMA */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCRESET : val */
+/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCMEMBASEADDR : val */
+/* Description: Base address to the CCB in the DDR memory space. aligned to minimum page size of 512B. */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__WIDTH (26)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__MASK (0x03FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x03FFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | (((uint32_t)(value) << 0) & 0x03FFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | 0x03FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x03FFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCMEMCCBSIZELOG2 : val */
+/* Description: The CCB size Log2(memory size/512B): 1 - 1kB (2 pages). 2 - 2kB. valid values are 1 to W_CCB_DESC_INDEX */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__WIDTH (5)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__MASK (0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000001FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL) | (((uint32_t)(value) << 0) & 0x0000001FL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL) | 0x0000001FL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001FL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCDESCCSINTERRUPT : val */
+/* Description: When > 0 the QDMC will interrupt the CS manager every written QdmcDescCsInterrupt descriptors. */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCBANKINTERLEAVEMODE : val */
+/* Description: Select the bank interleave mode: 2'd0 - interleave 8 banks (default),2'd1 - Interleave 4 banks,2'd2 - Interleave 2 banks,2'd3 - no interleave. */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__WIDTH (2)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__MASK (0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCMODE : val */
+/* Description: 0 - CONT_MODE. 1 - BURST_MODE */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCADDBURSTVAL : val */
+/* Description: Writing to this register increment the available descriptor counter in QDMC by QdmcAddBurstVal descriptors: AvailableDescsCounter += QdmcAddBurstVal. Reading this register should return the current available descriptors counter (AvailableDescsCounter). Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCMEMCCBSIZE : val */
+/* Description: The CCB size Log2(memory size/512B): 1 - 1kB (2 pages). 2 - 2kB. valid values are 1 to W_CCB_DESC_INDEX */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCDESCPERIPHINTERRUPT : val */
+/* Description: When > 0 the QDMC will interrupt the peripheral every written QdmcDescPeriphInterrupt descriptors. */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMCCCBPROCESSEDINDEX : val */
+/* Description: Used by the peripheral to indicates how many data is ready in the CCB (process). This is the CcbIndex (free pointer in CCB). */
+#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCENABLE : val */
+/* Description: Enable per channel,when disabled do not give credits to vDMA */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCRESET : val */
+/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCMODE : val */
+/* Description: QSMC mode of operation: 2'd0 - CONT_MODE 2'd1 - reserved. 2'd2 - BURST_MODE 2'd3 - C2C_MODE */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__WIDTH (2)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__MASK (0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCC2CSEL : val */
+/* Description: Selector for Channel-to-Channel credit input,selects QDMC channel as source for HW available descriptors */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__WIDTH (6)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__MASK (0x0000003FL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__RESET (0x0000003FL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000003FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000003FL) | (((uint32_t)(value) << 0) & 0x0000003FL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000003FL) | 0x0000003FL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000003FL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCADDBURSTVAL : val */
+/* Description: Writing to this register increment the available descriptor counter in QSMC by QsmcAddBurstVal descriptors: AvailableDescsCounter += QsmcAddBurstVal. Reading this register should return the current available descriptors counter (AvailableDescsCounter). Implemented as external register type. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCMEMBASEADDR : val */
+/* Description: Base address to the CCB in the DDR memory space. aligned to minimum page size of 512B. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__WIDTH (26)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__MASK (0x03FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x03FFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | (((uint32_t)(value) << 0) & 0x03FFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | 0x03FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x03FFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCMEMCCBSIZE : val */
+/* Description: The CCB size minus one in page size granularity. 0 - 1 desc 1 - 2 desc ... N_CCB_MAX_DESC-1 - N_CCB_MAX_DESC desc. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCPAGESIZE : val */
+/* Description: M2D Memory page size. Valid values are: 0 - 512B,1 - 1KB,2 - 2KB,3 - 4KB,4 - 1536B. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCSIMPPATTERNNUMPAGES : val */
+/* Description: Number of pages per line (simplified pattern has single line/pattern). */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCSIMPPATTERNRESIDUEPAGESIZE : val */
+/* Description: Residue page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__WIDTH (9)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__MASK (0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCBANKINTERLEAVEMODE : val */
+/* Description: Select the bank interleave mode: 2'd0 - interleave 8 banks (default),2'd1 - Interleave 4 banks,2'd2 - Interleave 2 banks,2'd3 - no interleave. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__WIDTH (2)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__MASK (0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000003L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCDESCPERIPHINTERRUPT : val */
+/* Description: When > 0 the QSMC will interrupt the peripheral every read QsmcDescPeriphInterrupt descriptors. */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMCCCBFREEINDEX : val */
+/* Description: Used by the peripheral to indicates how many data is ready in the CCB for write (process). This is the CcbIndex (free pointer in CCB). */
+#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__WIDTH (18)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__MASK (0x0003FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_CS_INTR_MASK : val */
+/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_CS_INTR_STATUS : val */
+/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_CS_INTR_W1C : val */
+/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_CS_INTR_W1S : val */
+/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_AP_INTR_MASK : val */
+/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_AP_INTR_STATUS : val */
+/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_AP_INTR_W1C : val */
+/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_AP_INTR_W1S : val */
+/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_DSP_INTR_MASK : val */
+/* Description: INT register */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_DSP_INTR_STATUS : val */
+/* Description: INT register */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_DSP_INTR_W1C : val */
+/* Description: INT register */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_DSP_INTR_W1S : val */
+/* Description: INT register */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_ERR_INTR_MASK : desc_err */
+/* Description: Summary of desc_err_intr register. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* ENGINE_ERR_INTR_MASK : qddc_crd_ovf_err */
+/* Description: Summary of qddc_crd_ovf_err_intr register. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
+
+/* ENGINE_ERR_INTR_MASK : qsdc_crd_ovf_err */
+/* Description: Summary of qsdc_crd_ovf_err_intr register. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__SHIFT (2)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__MASK (0x00000004L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000004L) >> 2)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000004L) | (((uint32_t)(value) << 2) & 0x00000004L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000004L) | ((uint32_t)(1) << 2))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000004L) | ((uint32_t)(0) << 2))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_ERR_INTR_STATUS : desc_err */
+/* Description: Summary of desc_err_intr register. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+
+/* ENGINE_ERR_INTR_STATUS : qddc_crd_ovf_err */
+/* Description: Summary of qddc_crd_ovf_err_intr register. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+
+/* ENGINE_ERR_INTR_STATUS : qsdc_crd_ovf_err */
+/* Description: Summary of qsdc_crd_ovf_err_intr register. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__SHIFT (2)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__MASK (0x00000004L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000004L) >> 2)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DESC_ERR_INTR_MASK : DescStatus */
+/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/* DESC_ERR_INTR_MASK : RemainPageSize */
+/* Description: non-zero REMAINING_PAGE_SIZE. Refer to EngErrInterruptSource register for the error origin. Refer to EngErrRemainPageSize register for the returned value. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__SHIFT (8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__MASK (0x00000100L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000100L) >> 8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000100L) | (((uint32_t)(value) << 8) & 0x00000100L))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(1) << 8))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(0) << 8))
+
+/* DESC_ERR_INTR_MASK : SrcDescWdataPar */
+/* Description: Source descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__SHIFT (9)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__MASK (0x00000200L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000200L) >> 9)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000200L) | (((uint32_t)(value) << 9) & 0x00000200L))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000200L) | ((uint32_t)(1) << 9))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000200L) | ((uint32_t)(0) << 9))
+
+/* DESC_ERR_INTR_MASK : DstDescWdataPar */
+/* Description: Destination descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__SHIFT (10)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__MASK (0x00000400L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000400L) >> 10)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000400L) | (((uint32_t)(value) << 10) & 0x00000400L))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000400L) | ((uint32_t)(1) << 10))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000400L) | ((uint32_t)(0) << 10))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DESC_ERR_INTR_STATUS : DescStatus */
+/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+
+/* DESC_ERR_INTR_STATUS : RemainPageSize */
+/* Description: non-zero REMAINING_PAGE_SIZE. Refer to EngErrInterruptSource register for the error origin. Refer to EngErrRemainPageSize register for the returned value. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__SHIFT (8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__MASK (0x00000100L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000100L) >> 8)
+
+/* DESC_ERR_INTR_STATUS : SrcDescWdataPar */
+/* Description: Source descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__SHIFT (9)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__MASK (0x00000200L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000200L) >> 9)
+
+/* DESC_ERR_INTR_STATUS : DstDescWdataPar */
+/* Description: Destination descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__SHIFT (10)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__MASK (0x00000400L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000400L) >> 10)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DESC_ERR_INTR_W1C : DescStatus */
+/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DESC_ERR_INTR_W1S : DescStatus */
+/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_CRD_OVF_ERR_INTR_MASK : ch */
+/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_CRD_OVF_ERR_INTR_STATUS : ch */
+/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_CRD_OVF_ERR_INTR_W1C : ch */
+/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_CRD_OVF_ERR_INTR_W1S : ch */
+/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_CRD_OVF_ERR_INTR_MASK : ch */
+/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_CRD_OVF_ERR_INTR_STATUS : ch */
+/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_CRD_OVF_ERR_INTR_W1C : ch */
+/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_CRD_OVF_ERR_INTR_W1S : ch */
+/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGERRINTERRUPTSOURCE : ChannelID */
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__MASK (0x0000000FL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000000FL) >> 0)
+
+/* ENGERRINTERRUPTSOURCE : Direction */
+/* Description: 0 - Destination. 1 - Source. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__SHIFT (4)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__MASK (0x00000010L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000010L) >> 4)
+
+/* ENGERRINTERRUPTSOURCE : Domain */
+/* Description: 0 - Device. 1 - Memory. */
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__SHIFT (5)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__MASK (0x00000020L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000020L) >> 5)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGERRREMAINPAGESIZE : val */
+/* Description: In case of non-zero REMAINING_PAGE_SIZE this register holds the latched value until cleared by writing to this register */
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__WIDTH (24)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__MASK (0x00FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00FFFFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGTRANSFERPAGESIZE : size */
+/* Description: TRANSFERRED_PAGE_SIZE value of last descriptor write to QDMC */
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__WIDTH (24)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__MASK (0x00FFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00FFFFFFL) >> 0)
+
+/* ENGTRANSFERPAGESIZE : ch_id */
+/* Description: QDMC Channel ID */
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__SHIFT (24)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__MASK (0x0F000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0F000000L) >> 24)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* VDMASOFTRESET : val */
+/* Description: Apply soft reset to vDMA. Must be cleared in order to release vDMA from soft reset. */
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* VDMASOFTRESET : par */
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__SHIFT (31)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__MASK (0x80000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x80000000L) >> 31)
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x80000000L) | (((uint32_t)(value) << 31) & 0x80000000L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(1) << 31))
+#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(0) << 31))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* VDMA_SHAREDBUS : cs_mask */
+/* Description: Bit mask on vDMA Sharedbus interrupt source for CS */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__MASK (0x0000000FL)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__RESET (0x0000000AL)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000000FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000000FL) | (((uint32_t)(value) << 0) & 0x0000000FL))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000000FL) | 0x0000000FL)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000000FL))
+
+/* VDMA_SHAREDBUS : ap_mask */
+/* Description: Bit mask on vDMA Sharedbus interrupt source for AP */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__SHIFT (4)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__MASK (0x000000F0L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__RESET (0x00000050L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000F0L) >> 4)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000F0L) | (((uint32_t)(value) << 4) & 0x000000F0L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000F0L) | 0x000000F0L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000F0L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_QDDC_REDUNDANT_EN : val */
+/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_QSDC_REDUNDANT_EN : val */
+/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_QDMC_REDUNDANT_EN : val */
+/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_QSMC_REDUNDANT_EN : val */
+/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_REDUNDANT_ASF_INT_MASK : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_REDUNDANT_ASF_INT_STATUS : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_REDUNDANT_ASF_INT_W1C : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_REDUNDANT_ASF_INT_W1S : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_REDUNDANT_ASF_INT_MASK : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_REDUNDANT_ASF_INT_STATUS : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_REDUNDANT_ASF_INT_W1C : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_REDUNDANT_ASF_INT_W1S : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMC_REDUNDANT_ASF_INT_MASK : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMC_REDUNDANT_ASF_INT_STATUS : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMC_REDUNDANT_ASF_INT_W1C : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMC_REDUNDANT_ASF_INT_W1S : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMC_REDUNDANT_ASF_INT_MASK : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMC_REDUNDANT_ASF_INT_STATUS : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMC_REDUNDANT_ASF_INT_W1C : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMC_REDUNDANT_ASF_INT_W1S : val */
+/* Description: Redundancy mode compare mismatch for QM pair i */
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000000FFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* PRIOISLP : val */
+/* Description: Indicates channel priority is low priority. */
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* READLPTOQOSVALUE : val */
+/* Description: The QOS toward DDR-AXI master for low priority read. */
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* READHPTOQOSVALUE : val */
+/* Description: The QOS toward DDR-AXI master for high priority read. */
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__RESET (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* WRITELPTOQOSVALUE : val */
+/* Description: The QOS toward DDR-AXI master for low priority write. */
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* WRITEHPTOQOSVALUE : val */
+/* Description: The QOS toward DDR-AXI master for high priority write. */
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__RESET (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DESCREADQOSVALUE : val */
+/* Description: The QOS toward DDR-desc-AXI master for read. */
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__RESET (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DESCWRITEQOSVALUE : val */
+/* Description: The QOS toward DDR-desc-AXI master for write. */
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__MASK (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__RESET (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000007L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* VDMA_ARB : prio_en */
+/* Description: Enable 2 level priority based channel arbitration in vDMA */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__RESET (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* VDMA_ARB : interleave_en */
+/* Description: Enable arbitration order to interleave between M2D and D2M channels */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__RESET (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
+
+/* VDMA_ARB : par */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__SHIFT (31)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__MASK (0x80000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x80000000L) >> 31)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x80000000L) | (((uint32_t)(value) << 31) & 0x80000000L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(1) << 31))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(0) << 31))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QM_CFG_CG_DELAY : val */
+/* Description: Clock cycles to keep clock running after enable condition is met */
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__MASK (0x0000000FL)
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__RESET (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000000FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000000FL) | (((uint32_t)(value) << 0) & 0x0000000FL))
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000000FL) | 0x0000000FL)
+#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000000FL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDDC_CFG_CG_BYPASS : val */
+/* Description: Bypass QDDC CG */
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSDC_CFG_CG_BYPASS : val */
+/* Description: Bypass QSDC CG */
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QDMC_CFG_CG_BYPASS : val */
+/* Description: Bypass QDMC CG */
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* QSMC_CFG_CG_BYPASS : val */
+/* Description: Bypass QSMC CG */
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_ASF_INT_MASK : parity_error_in_regfile */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_ASF_INT_STATUS : parity_error_in_regfile */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_ASF_INT_W1C : parity_error_in_regfile */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_ASF_INT_W1S : parity_error_in_regfile */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* ENGINE_RW_PARITY_BIST_MODE : val */
+/* Description: write 1 if want to work in rw_parity bist mode in which the parity bit is written by APB wdata and not from HW calculation */
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* VDMA_STOP_LP : dis */
+/* Description: Write 1 if want to disable LP Stop feature */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* VDMA_STOP_LP : force_val */
+/* Description: Force Stop LP state when feature is enabled */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* VDMA_SCH : stop_th */
+/* Description: Stop scheduling for this many cycles after each successful allocation */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__WIDTH (7)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__MASK (0x0000007FL)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__RESET (0x00000007L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000007FL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000007FL) | (((uint32_t)(value) << 0) & 0x0000007FL))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000007FL) | 0x0000007FL)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000007FL))
+
+/* VDMA_SCH : stop_en */
+/* Description: Enable periodic scheduling stopping mechanism */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__SHIFT (7)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__MASK (0x00000080L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__RESET (0x00000080L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000080L) >> 7)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000080L) | (((uint32_t)(value) << 7) & 0x00000080L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000080L) | ((uint32_t)(1) << 7))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000080L) | ((uint32_t)(0) << 7))
+
+/* VDMA_SCH : tsf24_mode */
+/* Description: Apply fix to increase maximum transfers to 24 */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__SHIFT (8)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__MASK (0x00000100L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000100L) >> 8)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000100L) | (((uint32_t)(value) << 8) & 0x00000100L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(1) << 8))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(0) << 8))
+
+/* VDMA_SCH : tsf_af_threshold */
+/* Description: Almost Full at 13 allocated TSF (12+8=20). In tsf24_mode should be set to 12. */
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__SHIFT (9)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__WIDTH (5)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__MASK (0x00003E00L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__RESET (0x00002800L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00003E00L) >> 9)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00003E00L) | (((uint32_t)(value) << 9) & 0x00003E00L))
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00003E00L) | 0x00003E00L)
+#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00003E00L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_SRC_DESC_TRACE : en */
+/* Description: Enable tracing of descriptors read from Source QMs */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* CFG_SRC_DESC_TRACE : stop_on_wrap */
+/* Description: Stop when reaching end of tracing buffer */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
+
+/* CFG_SRC_DESC_TRACE : mprot */
+/* Description: AWPROT value */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__SHIFT (2)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__MASK (0x0000001CL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000001CL) >> 2)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000001CL) | (((uint32_t)(value) << 2) & 0x0000001CL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001CL) | 0x0000001CL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001CL))
+
+/* CFG_SRC_DESC_TRACE : mcache */
+/* Description: AWCACHE value */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__SHIFT (5)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__MASK (0x000001E0L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__RESET (0x00000020L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000001E0L) >> 5)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000001E0L) | (((uint32_t)(value) << 5) & 0x000001E0L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001E0L) | 0x000001E0L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001E0L))
+
+/* CFG_SRC_DESC_TRACE : buff_size_m1 */
+/* Description: Buffer size minus 1 in 16B descriptors */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__SHIFT (16)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__MASK (0xFFFF0000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFF0000L) >> 16)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | (((uint32_t)(value) << 16) & 0xFFFF0000L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | 0xFFFF0000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFF0000L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_SRC_DESC_TRACE_BASE_ADDR : base_addr */
+/* Description: Buffer base address bits 34:4 aligned to 16B */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__WIDTH (31)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__MASK (0x7FFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x7FFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | (((uint32_t)(value) << 0) & 0x7FFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | 0x7FFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_DST_DESC_TRACE : en */
+/* Description: Enable tracing of descriptors read from Source QMs */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* CFG_DST_DESC_TRACE : stop_on_wrap */
+/* Description: Stop when reaching end of tracing buffer */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
+
+/* CFG_DST_DESC_TRACE : mprot */
+/* Description: AWPROT value */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__SHIFT (2)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__WIDTH (3)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__MASK (0x0000001CL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x0000001CL) >> 2)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x0000001CL) | (((uint32_t)(value) << 2) & 0x0000001CL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001CL) | 0x0000001CL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x0000001CL))
+
+/* CFG_DST_DESC_TRACE : mcache */
+/* Description: AWCACHE value. MER-3804 ECO: Note that bit 3 is double booked for timeout ExtRef default value which needs to be 1. In case debug tracing is enabled */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__SHIFT (5)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__WIDTH (4)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__MASK (0x000001E0L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__RESET (0x00000120L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x000001E0L) >> 5)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x000001E0L) | (((uint32_t)(value) << 5) & 0x000001E0L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001E0L) | 0x000001E0L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x000001E0L))
+
+/* CFG_DST_DESC_TRACE : buff_size_m1 */
+/* Description: Buffer size minus 1 in 16B descriptors */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__SHIFT (16)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__WIDTH (16)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__MASK (0xFFFF0000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFF0000L) >> 16)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | (((uint32_t)(value) << 16) & 0xFFFF0000L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | 0xFFFF0000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0xFFFF0000L))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_DST_DESC_TRACE_BASE_ADDR : base_addr */
+/* Description: Buffer base address bits 34:4 aligned to 16B. MER-3804 ECO: Note that bits 17:16 are double booked for timeout ExtRef mux. In case debug tracing and ExtRef are required to be turned on this constrain the base address bits 17:16 to be the same as the timestamp mux */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__WIDTH (31)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__MASK (0x7FFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x7FFFFFFFL) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | (((uint32_t)(value) << 0) & 0x7FFFFFFFL))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | 0x7FFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* CFG_DEBUG_TIMESTAMP : en */
+/* Description: Write 1 to enable timestamp counter for debug logic */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+/* CFG_DEBUG_TIMESTAMP : clr */
+/* Description: Write 1 to clear timestamp counter. After writing 1 to this field need to write 0 immediately */
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__SHIFT (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__MASK (0x00000002L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
+#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
+
+/*----------------------------------------------------------------------------------------------------*/
+/* DEBUG_TIMESTAMP : val */
+#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__WIDTH (32)
+#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__MASK (0xFFFFFFFFL)
+#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__RESET (0x00000000L)
+#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
+
+/*----------------------------------------------------------------------------------------------------*/
+/* AUTO_ADDRESS_ERR_CB_INDICATION : enable */
+/* Description: default is 1, meaning the address error is enabled, to hide the address error indication, set to 0 */
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__SHIFT (0)
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__WIDTH (1)
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__MASK (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__RESET (0x00000001L)
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__READ(reg_offset) \
+ (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__MODIFY(reg_offset, value) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__SET(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
+#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__CLR(reg_offset) \
+ (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
+
+
+#endif /* DRAM_DMA_ENGINE_CONFIG_MACRO_H */
--- /dev/null
+/*-------------------------------------------------------------------------------------
+// Copyright (c) 2022 by Hailotech This model is the confidential and
+// proprietary property of Hailotech and the possession or use of this
+// file requires a written license from Hailotech.
+-------------------------------------------------------------------------------------*/
+
+
+
+#include <stdint.h>
+
+#ifndef DRAM_DMA_ENGINE_CONFIG_REGS_H
+#define DRAM_DMA_ENGINE_CONFIG_REGS_H
+
+#include "dram_dma_package_macros.h"
+#include "dram_dma_engine_config_macros.h"
+
+typedef struct DRAM_DMA_ENGINE_CONFIG_regs_s {
+ volatile uint32_t QddcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x0 ; repeat: [16] */
+ volatile uint32_t QddcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x40 ; repeat: [16] */
+ volatile uint32_t QddcMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x80 ; repeat: [16] */
+ volatile uint32_t QddcAddBurstVal[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0xc0 ; repeat: [16] */
+ volatile uint32_t QddcMaxDesc[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x100 ; repeat: [16] */
+ volatile uint32_t QddcShmifoId[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x140 ; repeat: [16] */
+ volatile uint32_t QddcShmifoCreditSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x180 ; repeat: [16] */
+ volatile uint32_t QddcShmifoInitCredit[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x1c0 ; repeat: [16] */
+ volatile uint32_t QsdcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x200 ; repeat: [16] */
+ volatile uint32_t QsdcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x240 ; repeat: [16] */
+ volatile uint32_t QsdcMaxDesc[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x280 ; repeat: [16] */
+ volatile uint32_t QsdcShmifoId[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x2c0 ; repeat: [16] */
+ volatile uint32_t QsdcShmifoCreditSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x300 ; repeat: [16] */
+ volatile uint32_t QsdcFullNumPatterns[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN]; /* offset: 0x340 ; repeat: [4] */
+ volatile uint32_t QsdcFullPatternNumLines[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x350 ; repeat: [4, 4] */
+ volatile uint32_t QsdcFullPatternNumPages[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x390 ; repeat: [4, 4] */
+ volatile uint32_t QsdcFullPatternPageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x3d0 ; repeat: [4, 4] */
+ volatile uint32_t QsdcFullPatternResiduePageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x410 ; repeat: [4, 4] */
+ volatile uint32_t QsdcSimpPatternNumPages[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN]; /* offset: 0x450 ; repeat: [12] */
+ volatile uint32_t QsdcSimpPatternPageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN]; /* offset: 0x480 ; repeat: [12] */
+ volatile uint32_t QsdcSimpPatternResiduePageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN]; /* offset: 0x4b0 ; repeat: [12] */
+ volatile uint32_t QdmcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x4e0 ; repeat: [16] */
+ volatile uint32_t QdmcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x520 ; repeat: [16] */
+ volatile uint32_t QdmcMemBaseAddr[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x560 ; repeat: [16] */
+ volatile uint32_t QdmcMemCcbSizeLog2[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH]; /* offset: 0x5a0 ; repeat: [12] */
+ volatile uint32_t QdmcDescCsInterrupt[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x5d0 ; repeat: [16] */
+ volatile uint32_t QdmcBankInterleaveMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x610 ; repeat: [16] */
+ volatile uint32_t QdmcMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x650 ; repeat: [4] */
+ volatile uint32_t QdmcAddBurstVal[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x660 ; repeat: [4] */
+ volatile uint32_t QdmcMemCcbSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x670 ; repeat: [4] */
+ volatile uint32_t QdmcDescPeriphInterrupt[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x680 ; repeat: [4] */
+ volatile uint32_t QdmcCcbProcessedIndex[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x690 ; repeat: [4] */
+ volatile uint32_t QsmcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x6a0 ; repeat: [16] */
+ volatile uint32_t QsmcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x6e0 ; repeat: [16] */
+ volatile uint32_t QsmcMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x720 ; repeat: [16] */
+ volatile uint32_t QsmcC2cSel[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x760 ; repeat: [16] */
+ volatile uint32_t QsmcAddBurstVal[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x7a0 ; repeat: [16] */
+ volatile uint32_t QsmcMemBaseAddr[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x7e0 ; repeat: [16] */
+ volatile uint32_t QsmcMemCcbSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x820 ; repeat: [16] */
+ volatile uint32_t QsmcPageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x860 ; repeat: [16] */
+ volatile uint32_t QsmcSimpPatternNumPages[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x8a0 ; repeat: [16] */
+ volatile uint32_t QsmcSimpPatternResiduePageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x8e0 ; repeat: [16] */
+ volatile uint32_t QsmcBankInterleaveMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x920 ; repeat: [16] */
+ volatile uint32_t QsmcDescPeriphInterrupt[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x960 ; repeat: [4] */
+ volatile uint32_t QsmcCcbFreeIndex[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x970 ; repeat: [4] */
+ volatile uint32_t engine_cs_intr_mask; /* offset: 0x980 ; repeat: [1] */
+ volatile uint32_t engine_cs_intr_status; /* offset: 0x984 ; repeat: [1] */
+ volatile uint32_t engine_cs_intr_w1c; /* offset: 0x988 ; repeat: [1] */
+ volatile uint32_t engine_cs_intr_w1s; /* offset: 0x98c ; repeat: [1] */
+ volatile uint32_t engine_ap_intr_mask; /* offset: 0x990 ; repeat: [1] */
+ volatile uint32_t engine_ap_intr_status; /* offset: 0x994 ; repeat: [1] */
+ volatile uint32_t engine_ap_intr_w1c; /* offset: 0x998 ; repeat: [1] */
+ volatile uint32_t engine_ap_intr_w1s; /* offset: 0x99c ; repeat: [1] */
+ volatile uint32_t engine_dsp_intr_mask; /* offset: 0x9a0 ; repeat: [1] */
+ volatile uint32_t engine_dsp_intr_status; /* offset: 0x9a4 ; repeat: [1] */
+ volatile uint32_t engine_dsp_intr_w1c; /* offset: 0x9a8 ; repeat: [1] */
+ volatile uint32_t engine_dsp_intr_w1s; /* offset: 0x9ac ; repeat: [1] */
+ volatile uint32_t engine_err_intr_mask; /* offset: 0x9b0 ; repeat: [1] */
+ volatile uint32_t engine_err_intr_status; /* offset: 0x9b4 ; repeat: [1] */
+ volatile uint32_t desc_err_intr_mask; /* offset: 0x9b8 ; repeat: [1] */
+ volatile uint32_t desc_err_intr_status; /* offset: 0x9bc ; repeat: [1] */
+ volatile uint32_t desc_err_intr_w1c; /* offset: 0x9c0 ; repeat: [1] */
+ volatile uint32_t desc_err_intr_w1s; /* offset: 0x9c4 ; repeat: [1] */
+ volatile uint32_t qddc_crd_ovf_err_intr_mask; /* offset: 0x9c8 ; repeat: [1] */
+ volatile uint32_t qddc_crd_ovf_err_intr_status; /* offset: 0x9cc ; repeat: [1] */
+ volatile uint32_t qddc_crd_ovf_err_intr_w1c; /* offset: 0x9d0 ; repeat: [1] */
+ volatile uint32_t qddc_crd_ovf_err_intr_w1s; /* offset: 0x9d4 ; repeat: [1] */
+ volatile uint32_t qsdc_crd_ovf_err_intr_mask; /* offset: 0x9d8 ; repeat: [1] */
+ volatile uint32_t qsdc_crd_ovf_err_intr_status; /* offset: 0x9dc ; repeat: [1] */
+ volatile uint32_t qsdc_crd_ovf_err_intr_w1c; /* offset: 0x9e0 ; repeat: [1] */
+ volatile uint32_t qsdc_crd_ovf_err_intr_w1s; /* offset: 0x9e4 ; repeat: [1] */
+ volatile uint32_t EngErrInterruptSource; /* offset: 0x9e8 ; repeat: [1] */
+ volatile uint32_t EngErrRemainPageSize; /* offset: 0x9ec ; repeat: [1] */
+ volatile uint32_t EngTransferPageSize; /* offset: 0x9f0 ; repeat: [1] */
+ volatile uint32_t VdmaSoftReset; /* offset: 0x9f4 ; repeat: [1] */
+ volatile uint32_t vdma_sharedbus; /* offset: 0x9f8 ; repeat: [1] */
+ volatile uint32_t cfg_qddc_redundant_en; /* offset: 0x9fc ; repeat: [1] */
+ volatile uint32_t cfg_qsdc_redundant_en; /* offset: 0xa00 ; repeat: [1] */
+ volatile uint32_t cfg_qdmc_redundant_en; /* offset: 0xa04 ; repeat: [1] */
+ volatile uint32_t cfg_qsmc_redundant_en; /* offset: 0xa08 ; repeat: [1] */
+ volatile uint32_t qddc_redundant_asf_int_mask; /* offset: 0xa0c ; repeat: [1] */
+ volatile uint32_t qddc_redundant_asf_int_status; /* offset: 0xa10 ; repeat: [1] */
+ volatile uint32_t qddc_redundant_asf_int_w1c; /* offset: 0xa14 ; repeat: [1] */
+ volatile uint32_t qddc_redundant_asf_int_w1s; /* offset: 0xa18 ; repeat: [1] */
+ volatile uint32_t qsdc_redundant_asf_int_mask; /* offset: 0xa1c ; repeat: [1] */
+ volatile uint32_t qsdc_redundant_asf_int_status; /* offset: 0xa20 ; repeat: [1] */
+ volatile uint32_t qsdc_redundant_asf_int_w1c; /* offset: 0xa24 ; repeat: [1] */
+ volatile uint32_t qsdc_redundant_asf_int_w1s; /* offset: 0xa28 ; repeat: [1] */
+ volatile uint32_t qdmc_redundant_asf_int_mask; /* offset: 0xa2c ; repeat: [1] */
+ volatile uint32_t qdmc_redundant_asf_int_status; /* offset: 0xa30 ; repeat: [1] */
+ volatile uint32_t qdmc_redundant_asf_int_w1c; /* offset: 0xa34 ; repeat: [1] */
+ volatile uint32_t qdmc_redundant_asf_int_w1s; /* offset: 0xa38 ; repeat: [1] */
+ volatile uint32_t qsmc_redundant_asf_int_mask; /* offset: 0xa3c ; repeat: [1] */
+ volatile uint32_t qsmc_redundant_asf_int_status; /* offset: 0xa40 ; repeat: [1] */
+ volatile uint32_t qsmc_redundant_asf_int_w1c; /* offset: 0xa44 ; repeat: [1] */
+ volatile uint32_t qsmc_redundant_asf_int_w1s; /* offset: 0xa48 ; repeat: [1] */
+ volatile uint32_t PrioIsLp; /* offset: 0xa4c ; repeat: [1] */
+ volatile uint32_t ReadLpToQosValue; /* offset: 0xa50 ; repeat: [1] */
+ volatile uint32_t ReadHpToQosValue; /* offset: 0xa54 ; repeat: [1] */
+ volatile uint32_t WriteLpToQosValue; /* offset: 0xa58 ; repeat: [1] */
+ volatile uint32_t WriteHpToQosValue; /* offset: 0xa5c ; repeat: [1] */
+ volatile uint32_t DescReadQosValue; /* offset: 0xa60 ; repeat: [1] */
+ volatile uint32_t DescWriteQosValue; /* offset: 0xa64 ; repeat: [1] */
+ volatile uint32_t vdma_arb; /* offset: 0xa68 ; repeat: [1] */
+ volatile uint32_t qm_cfg_cg_delay; /* offset: 0xa6c ; repeat: [1] */
+ volatile uint32_t qddc_cfg_cg_bypass; /* offset: 0xa70 ; repeat: [1] */
+ volatile uint32_t qsdc_cfg_cg_bypass; /* offset: 0xa74 ; repeat: [1] */
+ volatile uint32_t qdmc_cfg_cg_bypass; /* offset: 0xa78 ; repeat: [1] */
+ volatile uint32_t qsmc_cfg_cg_bypass; /* offset: 0xa7c ; repeat: [1] */
+ volatile uint32_t engine_asf_int_mask; /* offset: 0xa80 ; repeat: [1] */
+ volatile uint32_t engine_asf_int_status; /* offset: 0xa84 ; repeat: [1] */
+ volatile uint32_t engine_asf_int_w1c; /* offset: 0xa88 ; repeat: [1] */
+ volatile uint32_t engine_asf_int_w1s; /* offset: 0xa8c ; repeat: [1] */
+ volatile uint32_t engine_rw_parity_bist_mode; /* offset: 0xa90 ; repeat: [1] */
+ volatile uint32_t vdma_stop_lp; /* offset: 0xa94 ; repeat: [1] */
+ volatile uint32_t vdma_sch; /* offset: 0xa98 ; repeat: [1] */
+ volatile uint32_t cfg_src_desc_trace; /* offset: 0xa9c ; repeat: [1] */
+ volatile uint32_t cfg_src_desc_trace_base_addr; /* offset: 0xaa0 ; repeat: [1] */
+ volatile uint32_t cfg_dst_desc_trace; /* offset: 0xaa4 ; repeat: [1] */
+ volatile uint32_t cfg_dst_desc_trace_base_addr; /* offset: 0xaa8 ; repeat: [1] */
+ volatile uint32_t cfg_debug_timestamp; /* offset: 0xaac ; repeat: [1] */
+ volatile uint32_t debug_timestamp; /* offset: 0xab0 ; repeat: [1] */
+ volatile uint32_t auto_address_err_cb_indication; /* offset: 0xab4 ; repeat: [1] */
+} DRAM_DMA_ENGINE_CONFIG_t;
+
+#endif /* DRAM_DMA_ENGINE_CONFIG_REGS_H */
--- /dev/null
+/*-------------------------------------------------------------------------------------
+// Copyright (c) 2022 by Hailotech This model is the confidential and
+// proprietary property of Hailotech and the possession or use of this
+// file requires a written license from Hailotech.
+-------------------------------------------------------------------------------------*/
+
+
+
+#include <stdint.h>
+
+#ifndef DRAM_DMA_PACKAGE_MACROS_H
+#define DRAM_DMA_PACKAGE_MACROS_H
+
+/* HW constants and parameters for package "dram_dma" */
+
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_AXI_QOS_BITS (3)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_CH (32)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_CH_RX_CREDIT (4096)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_CH_TX_CREDIT (2048)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DD_DESC (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DESC (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DM_DESC (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_PATTERNS_MAX_LINES (262144)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_PATTERNS_MAX_PAGES (262144)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH (12)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_RX_SHMIFO (24)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SD_DESC (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN (12)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SM_DESC (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SW_CH (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SW_INT (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_TX_SHMIFO (20)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__PAGE_SIZE_MAX (13)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__PAGE_SIZE_MAX_8B (10)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_BURST_SIZE (29)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_BURST_SIZE_8B (26)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_C2C_SEL (6)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CCB_DESC_INDEX (18)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CCB_DESC_INDEX_LOG (5)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CFG_DATA (32)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH (5)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH_CREDIT_SIZE (10)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH_RX_CREDIT (13)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH_TX_CREDIT (12)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CORE_ADDR (35)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CORE_BASE_ADDR (29)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CSR_CFG_ADDR (13)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DDR_ADDR (35)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DDR_BASE_ADDR (26)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DD_DESC (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DESC (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DESC_DEMUX_ADDR (43)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DIR_CH (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DM_DESC (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_ENG_CFG_ADDR (14)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_MAX_PATTERNS (2)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_PATTERNS_MAX_LINES (18)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_PATTERNS_MAX_PAGES (18)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SD_DESC (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SHMIFO (5)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SM_DESC (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SW_CH (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ADDR (64)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_DATA_DATA (64)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_DATA_DESC (128)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ID_DATA0 (2)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ID_DATA1 (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ID_DESC (3)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_CFG_ADDR (10)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_MEM_ADDR (5)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_MEM_DATA (256)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__ADDR_ALLSTRB_OFFSET (56)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__ADDR_APCMD_OFFSET (55)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__FPGA_N_HW_DMA_ENG (0)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_DESC_AXI (1)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_DMA_ENG (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_HMASTER (4)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_HW_DMA_ENG (3)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_SW_DMA_ENG (1)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_TOT_DMA_DIR_CH (48)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_VISION_CH (10)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_CFG_ADDR (16)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_CFG_DATA (32)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_CSR_CFG_ADDR (12)
+#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_TOT_DMA_DIR_CH (6)
+
+
+#endif /* DRAM_DMA_PACKAGE_MACROS_H */
+++ /dev/null
-/*-------------------------------------------------------------------------------------
-// Copyright (c) 2022 by Hailotech This model is the confidential and
-// proprietary property of Hailotech and the possession or use of this
-// file requires a written license from Hailotech.
--------------------------------------------------------------------------------------*/
-
-
-
-#include <stdint.h>
-
-#ifndef DRAM_DMA_ENGINE_CONFIG_MACRO_H
-#define DRAM_DMA_ENGINE_CONFIG_MACRO_H
-
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCENABLE : val */
-/* Description: Enable per channel,when disabled do not give credits to vDMA */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCENABLE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCRESET : val */
-/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCRESET__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCMODE : val */
-/* Description: 0 - CONT_MODE. 1 - BURST_MODE */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMODE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCADDBURSTVAL : val */
-/* Description: Writing to this register increment the remain burst counter in QDDC by QddcAddBurstVal x 8 Bytes: RemainBurstCount += QddcAddBurstVal. Reading this register should return the current available credit counter (RemainBurstCount) in 2s complement format - can be negative. Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__WIDTH (27)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__MASK (0x07FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x07FFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x07FFFFFFL) | (((uint32_t)(value) << 0) & 0x07FFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x07FFFFFFL) | 0x07FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCADDBURSTVAL__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x07FFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCMAXDESC : val */
-/* Description: Maximum in flight descriptors,this is a TH for number of descriptors the QM might give the vDMA. 3'd0 - 1 descriptor (debug mode). 3'd1 - N_QM_DESC*1/8 (2). 3'd2 - N_QM_DESC*2/8 (4). 3'd3 - N_QM_DESC*3/8 (6). 3'd4 - N_QM_DESC*2/4 (8). 3'd5 - N_QM_DESC*5/8 (10). 3'd6 - N_QM_DESC*6/8 (12). 3'd7 - N_QM_DESC-1 (15-maximum),default. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__RESET (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCMAXDESC__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCSHMIFOID : val */
-/* Description: The RX-SHMIFO ID. Used to know the SHMIFO base address (from a global parameter/define) and used to select the correct SHMIFO credit signal (nn_core_inbound_buffer_ready_pulse). 0-19: for DSM-RX 0-19. 20-23: for CSM 0-3. 24-30: reserved. 31: NULL ignore any credit from NN Core. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__WIDTH (5)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__MASK (0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__RESET (0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000001FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL) | (((uint32_t)(value) << 0) & 0x0000001FL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL) | 0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOID__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCSHMIFOCREDITSIZE : val */
-/* Description: The credit size in 8B granularity minus 1. 0 - indicates 8B 1 - indicates 16B ... 10'd1023 - indicates 8kB */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__WIDTH (10)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__MASK (0x000003FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000003FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000003FFL) | (((uint32_t)(value) << 0) & 0x000003FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000003FFL) | 0x000003FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOCREDITSIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000003FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDCSHMIFOINITCREDIT : val */
-/* Description: Writing to this register set the amount of credit from SHMIFO RX (AvailableCredits),used to configure the initial amount of credits,reading this register should return the value of AvailableCredits. Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__WIDTH (13)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__MASK (0x00001FFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00001FFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00001FFFL) | (((uint32_t)(value) << 0) & 0x00001FFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00001FFFL) | 0x00001FFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDCSHMIFOINITCREDIT__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00001FFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCENABLE : val */
-/* Description: Enable per channel,when disabled do not give credits to vDMA */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCENABLE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCRESET : val */
-/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCRESET__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCMAXDESC : val */
-/* Description: Maximum in flight descriptors,this is a TH for number of descriptors the QM might give the vDMA. 3'd0 - 1 descriptor (debug mode). 3'd1 - N_QM_DESC*1/8 (2). 3'd2 - N_QM_DESC*2/8 (4). 3'd3 - N_QM_DESC*3/8 (6). 3'd4 - N_QM_DESC*4/8 (8). 3'd5 - N_QM_DESC*5/8 (10). 3'd6 - N_QM_DESC*6/8 (12). 3'd7 - N_QM_DESC-1 (15-maximum),default. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__RESET (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCMAXDESC__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCSHMIFOID : val */
-/* Description: The TX-SHMIFO ID. Used to know the SHMIFO base address (from a global parameter/define) and used to select the correct SHMIFO credit signal (nn_core_outbound_buffer_valid_pulse). 0-19: for DSM-TX 0-19. 20-30: reserved. 31: NULL ignore any credit from NN Core. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__WIDTH (5)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__MASK (0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__RESET (0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000001FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL) | (((uint32_t)(value) << 0) & 0x0000001FL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL) | 0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOID__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCSHMIFOCREDITSIZE : val */
-/* Description: The credit size in 8B granularity minus 1. 0 - indicates 8B 1 - indicates 16B ... 10'd1023 - indicates 8kB */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__WIDTH (10)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__MASK (0x000003FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000003FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000003FFL) | (((uint32_t)(value) << 0) & 0x000003FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000003FFL) | 0x000003FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSHMIFOCREDITSIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000003FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCFULLNUMPATTERNS : val */
-/* Description: Number of patterns per pattern ID minus one. 0 - one pattern,1 - two patterns,...,3 - four patterns. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__WIDTH (2)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__MASK (0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLNUMPATTERNS__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCFULLPATTERNNUMLINES : val */
-/* Description: Number of lines per pattern. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMLINES__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCFULLPATTERNNUMPAGES : val */
-/* Description: Number of pages per line. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNNUMPAGES__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCFULLPATTERNPAGESIZE : val */
-/* Description: page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__WIDTH (9)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__MASK (0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNPAGESIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCFULLPATTERNRESIDUEPAGESIZE : val */
-/* Description: Residue page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__WIDTH (9)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__MASK (0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCFULLPATTERNRESIDUEPAGESIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCSIMPPATTERNNUMPAGES : val */
-/* Description: Number of pages per line (simplified pattern has single line/pattern). */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNNUMPAGES__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCSIMPPATTERNPAGESIZE : val */
-/* Description: Log2(Page size/512B),valid values are 0 to PAGE_SIZE_MAX-10. 0 - 512B,1 - 1kB,2 - 2kB,3 - 4kB */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__WIDTH (2)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__MASK (0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNPAGESIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDCSIMPPATTERNRESIDUEPAGESIZE : val */
-/* Description: Residue page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__WIDTH (9)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__MASK (0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDCSIMPPATTERNRESIDUEPAGESIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCENABLE : val */
-/* Description: Enable per channel,when disabled do not give credits to vDMA */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCENABLE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCRESET : val */
-/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCRESET__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCMEMBASEADDR : val */
-/* Description: Base address to the CCB in the DDR memory space. aligned to minimum page size of 512B. */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__WIDTH (26)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__MASK (0x03FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x03FFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | (((uint32_t)(value) << 0) & 0x03FFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | 0x03FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMBASEADDR__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x03FFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCMEMCCBSIZELOG2 : val */
-/* Description: The CCB size Log2(memory size/512B): 1 - 1kB (2 pages). 2 - 2kB. valid values are 1 to W_CCB_DESC_INDEX */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__WIDTH (5)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__MASK (0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000001FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL) | (((uint32_t)(value) << 0) & 0x0000001FL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL) | 0x0000001FL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZELOG2__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001FL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCDESCCSINTERRUPT : val */
-/* Description: When > 0 the QDMC will interrupt the CS manager every written QdmcDescCsInterrupt descriptors. */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCCSINTERRUPT__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCBANKINTERLEAVEMODE : val */
-/* Description: Select the bank interleave mode: 2'd0 - interleave 8 banks (default),2'd1 - Interleave 4 banks,2'd2 - Interleave 2 banks,2'd3 - no interleave. */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__WIDTH (2)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__MASK (0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCBANKINTERLEAVEMODE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCMODE : val */
-/* Description: 0 - CONT_MODE. 1 - BURST_MODE */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMODE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCADDBURSTVAL : val */
-/* Description: Writing to this register increment the available descriptor counter in QDMC by QdmcAddBurstVal descriptors: AvailableDescsCounter += QdmcAddBurstVal. Reading this register should return the current available descriptors counter (AvailableDescsCounter). Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCADDBURSTVAL__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCMEMCCBSIZE : val */
-/* Description: The CCB size Log2(memory size/512B): 1 - 1kB (2 pages). 2 - 2kB. valid values are 1 to W_CCB_DESC_INDEX */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCMEMCCBSIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCDESCPERIPHINTERRUPT : val */
-/* Description: When > 0 the QDMC will interrupt the peripheral every written QdmcDescPeriphInterrupt descriptors. */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCDESCPERIPHINTERRUPT__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMCCCBPROCESSEDINDEX : val */
-/* Description: Used by the peripheral to indicates how many data is ready in the CCB (process). This is the CcbIndex (free pointer in CCB). */
-#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMCCCBPROCESSEDINDEX__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCENABLE : val */
-/* Description: Enable per channel,when disabled do not give credits to vDMA */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCENABLE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCRESET : val */
-/* Description: Soft reset per channel,when write 1'b1 should clear all internal credits/counter/status. Should be set when channel is disabled,usually with vDMA channel reset (abort). Write 1'b0 should do nothing. Read always return 1'b0. Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCRESET__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCMODE : val */
-/* Description: QSMC mode of operation: 2'd0 - CONT_MODE 2'd1 - reserved. 2'd2 - BURST_MODE 2'd3 - C2C_MODE */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__WIDTH (2)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__MASK (0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMODE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCC2CSEL : val */
-/* Description: Selector for Channel-to-Channel credit input,selects QDMC channel as source for HW available descriptors */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__WIDTH (6)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__MASK (0x0000003FL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__RESET (0x0000003FL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000003FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000003FL) | (((uint32_t)(value) << 0) & 0x0000003FL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000003FL) | 0x0000003FL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCC2CSEL__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000003FL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCADDBURSTVAL : val */
-/* Description: Writing to this register increment the available descriptor counter in QSMC by QsmcAddBurstVal descriptors: AvailableDescsCounter += QsmcAddBurstVal. Reading this register should return the current available descriptors counter (AvailableDescsCounter). Implemented as external register type. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCADDBURSTVAL__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCMEMBASEADDR : val */
-/* Description: Base address to the CCB in the DDR memory space. aligned to minimum page size of 512B. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__WIDTH (26)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__MASK (0x03FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x03FFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | (((uint32_t)(value) << 0) & 0x03FFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x03FFFFFFL) | 0x03FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMBASEADDR__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x03FFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCMEMCCBSIZE : val */
-/* Description: The CCB size minus one in page size granularity. 0 - 1 desc 1 - 2 desc ... N_CCB_MAX_DESC-1 - N_CCB_MAX_DESC desc. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCMEMCCBSIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCPAGESIZE : val */
-/* Description: M2D Memory page size. Valid values are: 0 - 512B,1 - 1KB,2 - 2KB,3 - 4KB,4 - 1536B. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCPAGESIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCSIMPPATTERNNUMPAGES : val */
-/* Description: Number of pages per line (simplified pattern has single line/pattern). */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNNUMPAGES__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCSIMPPATTERNRESIDUEPAGESIZE : val */
-/* Description: Residue page size in 8B granularity,minus one,per pattern. 0-8B,1-16B,...,511-4kB */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__WIDTH (9)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__MASK (0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000001FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | (((uint32_t)(value) << 0) & 0x000001FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL) | 0x000001FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCSIMPPATTERNRESIDUEPAGESIZE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCBANKINTERLEAVEMODE : val */
-/* Description: Select the bank interleave mode: 2'd0 - interleave 8 banks (default),2'd1 - Interleave 4 banks,2'd2 - Interleave 2 banks,2'd3 - no interleave. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__WIDTH (2)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__MASK (0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000003L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | (((uint32_t)(value) << 0) & 0x00000003L))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L) | 0x00000003L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCBANKINTERLEAVEMODE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000003L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCDESCPERIPHINTERRUPT : val */
-/* Description: When > 0 the QSMC will interrupt the peripheral every read QsmcDescPeriphInterrupt descriptors. */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | (((uint32_t)(value) << 0) & 0x0003FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL) | 0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCDESCPERIPHINTERRUPT__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0003FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMCCCBFREEINDEX : val */
-/* Description: Used by the peripheral to indicates how many data is ready in the CCB for write (process). This is the CcbIndex (free pointer in CCB). */
-#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__WIDTH (18)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__MASK (0x0003FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMCCCBFREEINDEX__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0003FFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_CS_INTR_MASK : val */
-/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_CS_INTR_STATUS : val */
-/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_CS_INTR_W1C : val */
-/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_CS_INTR_W1S : val */
-/* Description: INT register bits[15:0] per M2D channel,indicating one of the following events: a. Internal desc - QSMC processed last CCB descriptor. Implemented by set the interrupt when CCB-free-index is wrapped (become zero),might be used for CONF channel - to indicates conf is done. bits[31:16] per D2M channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescCsInterrupt (OR) External desc - domain#0 (local) source/destination event. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_CS_INTR_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_AP_INTR_MASK : val */
-/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_AP_INTR_STATUS : val */
-/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_AP_INTR_W1C : val */
-/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_AP_INTR_W1S : val */
-/* Description: INT register bit per direction/channel indicating one of the following events: Internal desc - QDMC processed descriptors per QdmcDescPeriphInterrupt (D2M enhanced channels only) (OR) Internal desc - QSMC processed descriptors per QsmcDescPeriphInterrupt (M2D enhanced channels only) (OR) External desc - domain#1 (host) source/destination event */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_AP_INTR_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_DSP_INTR_MASK : val */
-/* Description: INT register */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_DSP_INTR_STATUS : val */
-/* Description: INT register */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_DSP_INTR_W1C : val */
-/* Description: INT register */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_DSP_INTR_W1S : val */
-/* Description: INT register */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_DSP_INTR_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_ERR_INTR_MASK : desc_err */
-/* Description: Summary of desc_err_intr register. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__DESC_ERR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* ENGINE_ERR_INTR_MASK : qddc_crd_ovf_err */
-/* Description: Summary of qddc_crd_ovf_err_intr register. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QDDC_CRD_OVF_ERR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
-
-/* ENGINE_ERR_INTR_MASK : qsdc_crd_ovf_err */
-/* Description: Summary of qsdc_crd_ovf_err_intr register. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__SHIFT (2)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__MASK (0x00000004L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000004L) >> 2)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000004L) | (((uint32_t)(value) << 2) & 0x00000004L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000004L) | ((uint32_t)(1) << 2))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_MASK__QSDC_CRD_OVF_ERR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000004L) | ((uint32_t)(0) << 2))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_ERR_INTR_STATUS : desc_err */
-/* Description: Summary of desc_err_intr register. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__DESC_ERR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-
-/* ENGINE_ERR_INTR_STATUS : qddc_crd_ovf_err */
-/* Description: Summary of qddc_crd_ovf_err_intr register. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QDDC_CRD_OVF_ERR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-
-/* ENGINE_ERR_INTR_STATUS : qsdc_crd_ovf_err */
-/* Description: Summary of qsdc_crd_ovf_err_intr register. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__SHIFT (2)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__MASK (0x00000004L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ERR_INTR_STATUS__QSDC_CRD_OVF_ERR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000004L) >> 2)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DESC_ERR_INTR_MASK : DescStatus */
-/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DESCSTATUS__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/* DESC_ERR_INTR_MASK : RemainPageSize */
-/* Description: non-zero REMAINING_PAGE_SIZE. Refer to EngErrInterruptSource register for the error origin. Refer to EngErrRemainPageSize register for the returned value. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__SHIFT (8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__MASK (0x00000100L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000100L) >> 8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000100L) | (((uint32_t)(value) << 8) & 0x00000100L))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(1) << 8))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__REMAINPAGESIZE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(0) << 8))
-
-/* DESC_ERR_INTR_MASK : SrcDescWdataPar */
-/* Description: Source descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__SHIFT (9)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__MASK (0x00000200L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000200L) >> 9)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000200L) | (((uint32_t)(value) << 9) & 0x00000200L))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000200L) | ((uint32_t)(1) << 9))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__SRCDESCWDATAPAR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000200L) | ((uint32_t)(0) << 9))
-
-/* DESC_ERR_INTR_MASK : DstDescWdataPar */
-/* Description: Destination descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__SHIFT (10)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__MASK (0x00000400L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000400L) >> 10)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000400L) | (((uint32_t)(value) << 10) & 0x00000400L))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000400L) | ((uint32_t)(1) << 10))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_MASK__DSTDESCWDATAPAR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000400L) | ((uint32_t)(0) << 10))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DESC_ERR_INTR_STATUS : DescStatus */
-/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DESCSTATUS__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-
-/* DESC_ERR_INTR_STATUS : RemainPageSize */
-/* Description: non-zero REMAINING_PAGE_SIZE. Refer to EngErrInterruptSource register for the error origin. Refer to EngErrRemainPageSize register for the returned value. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__SHIFT (8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__MASK (0x00000100L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__REMAINPAGESIZE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000100L) >> 8)
-
-/* DESC_ERR_INTR_STATUS : SrcDescWdataPar */
-/* Description: Source descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__SHIFT (9)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__MASK (0x00000200L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__SRCDESCWDATAPAR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000200L) >> 9)
-
-/* DESC_ERR_INTR_STATUS : DstDescWdataPar */
-/* Description: Destination descriptor complete with error status. Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__SHIFT (10)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__MASK (0x00000400L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_STATUS__DSTDESCWDATAPAR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000400L) >> 10)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DESC_ERR_INTR_W1C : DescStatus */
-/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1C__DESCSTATUS__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DESC_ERR_INTR_W1S : DescStatus */
-/* Description: Interrupt bit per DESC_STATUS fields of vDMA descriptor which returned unexpected value (Note that successful descriptor returns status of 8'h1). Refer to EngErrInterruptSource register for the error origin. */
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__DESC_ERR_INTR_W1S__DESCSTATUS__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_CRD_OVF_ERR_INTR_MASK : ch */
-/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_MASK__CH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_CRD_OVF_ERR_INTR_STATUS : ch */
-/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_STATUS__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_CRD_OVF_ERR_INTR_W1C : ch */
-/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1C__CH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_CRD_OVF_ERR_INTR_W1S : ch */
-/* Description: Interrupt bit per QDDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CRD_OVF_ERR_INTR_W1S__CH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_CRD_OVF_ERR_INTR_MASK : ch */
-/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_MASK__CH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_CRD_OVF_ERR_INTR_STATUS : ch */
-/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_STATUS__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_CRD_OVF_ERR_INTR_W1C : ch */
-/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1C__CH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_CRD_OVF_ERR_INTR_W1S : ch */
-/* Description: Interrupt bit per QSDC channel indicating overflow or underflow in Core credit counter. */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CRD_OVF_ERR_INTR_W1S__CH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGERRINTERRUPTSOURCE : ChannelID */
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__MASK (0x0000000FL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__CHANNELID__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000000FL) >> 0)
-
-/* ENGERRINTERRUPTSOURCE : Direction */
-/* Description: 0 - Destination. 1 - Source. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__SHIFT (4)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__MASK (0x00000010L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DIRECTION__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000010L) >> 4)
-
-/* ENGERRINTERRUPTSOURCE : Domain */
-/* Description: 0 - Device. 1 - Memory. */
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__SHIFT (5)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__MASK (0x00000020L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRINTERRUPTSOURCE__DOMAIN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000020L) >> 5)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGERRREMAINPAGESIZE : val */
-/* Description: In case of non-zero REMAINING_PAGE_SIZE this register holds the latched value until cleared by writing to this register */
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__WIDTH (24)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__MASK (0x00FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGERRREMAINPAGESIZE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00FFFFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGTRANSFERPAGESIZE : size */
-/* Description: TRANSFERRED_PAGE_SIZE value of last descriptor write to QDMC */
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__WIDTH (24)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__MASK (0x00FFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__SIZE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00FFFFFFL) >> 0)
-
-/* ENGTRANSFERPAGESIZE : ch_id */
-/* Description: QDMC Channel ID */
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__SHIFT (24)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__MASK (0x0F000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGTRANSFERPAGESIZE__CH_ID__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0F000000L) >> 24)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* VDMASOFTRESET : val */
-/* Description: Apply soft reset to vDMA. Must be cleared in order to release vDMA from soft reset. */
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* VDMASOFTRESET : par */
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__SHIFT (31)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__MASK (0x80000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x80000000L) >> 31)
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x80000000L) | (((uint32_t)(value) << 31) & 0x80000000L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(1) << 31))
-#define DRAM_DMA_ENGINE_CONFIG__VDMASOFTRESET__PAR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(0) << 31))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* VDMA_SHAREDBUS : cs_mask */
-/* Description: Bit mask on vDMA Sharedbus interrupt source for CS */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__MASK (0x0000000FL)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__RESET (0x0000000AL)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000000FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000000FL) | (((uint32_t)(value) << 0) & 0x0000000FL))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000000FL) | 0x0000000FL)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__CS_MASK__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000000FL))
-
-/* VDMA_SHAREDBUS : ap_mask */
-/* Description: Bit mask on vDMA Sharedbus interrupt source for AP */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__SHIFT (4)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__MASK (0x000000F0L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__RESET (0x00000050L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000F0L) >> 4)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000F0L) | (((uint32_t)(value) << 4) & 0x000000F0L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000F0L) | 0x000000F0L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SHAREDBUS__AP_MASK__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000F0L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_QDDC_REDUNDANT_EN : val */
-/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDDC_REDUNDANT_EN__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_QSDC_REDUNDANT_EN : val */
-/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSDC_REDUNDANT_EN__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_QDMC_REDUNDANT_EN : val */
-/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QDMC_REDUNDANT_EN__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_QSMC_REDUNDANT_EN : val */
-/* Description: Redundancy mode enable bit per QM pair. bit i makes QM[i*2+1] a redundancy for QM[i*2] */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_QSMC_REDUNDANT_EN__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_REDUNDANT_ASF_INT_MASK : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_REDUNDANT_ASF_INT_STATUS : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_REDUNDANT_ASF_INT_W1C : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_REDUNDANT_ASF_INT_W1S : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_REDUNDANT_ASF_INT_MASK : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_REDUNDANT_ASF_INT_STATUS : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_REDUNDANT_ASF_INT_W1C : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_REDUNDANT_ASF_INT_W1S : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMC_REDUNDANT_ASF_INT_MASK : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMC_REDUNDANT_ASF_INT_STATUS : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMC_REDUNDANT_ASF_INT_W1C : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMC_REDUNDANT_ASF_INT_W1S : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMC_REDUNDANT_ASF_INT_MASK : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_MASK__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMC_REDUNDANT_ASF_INT_STATUS : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_STATUS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMC_REDUNDANT_ASF_INT_W1C : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1C__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMC_REDUNDANT_ASF_INT_W1S : val */
-/* Description: Redundancy mode compare mismatch for QM pair i */
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__WIDTH (8)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__MASK (0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000000FFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | (((uint32_t)(value) << 0) & 0x000000FFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL) | 0x000000FFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_REDUNDANT_ASF_INT_W1S__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000000FFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* PRIOISLP : val */
-/* Description: Indicates channel priority is low priority. */
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | (((uint32_t)(value) << 0) & 0xFFFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL) | 0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__PRIOISLP__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* READLPTOQOSVALUE : val */
-/* Description: The QOS toward DDR-AXI master for low priority read. */
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__READLPTOQOSVALUE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* READHPTOQOSVALUE : val */
-/* Description: The QOS toward DDR-AXI master for high priority read. */
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__RESET (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__READHPTOQOSVALUE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* WRITELPTOQOSVALUE : val */
-/* Description: The QOS toward DDR-AXI master for low priority write. */
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__WRITELPTOQOSVALUE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* WRITEHPTOQOSVALUE : val */
-/* Description: The QOS toward DDR-AXI master for high priority write. */
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__RESET (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__WRITEHPTOQOSVALUE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DESCREADQOSVALUE : val */
-/* Description: The QOS toward DDR-desc-AXI master for read. */
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__RESET (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__DESCREADQOSVALUE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DESCWRITEQOSVALUE : val */
-/* Description: The QOS toward DDR-desc-AXI master for write. */
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__MASK (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__RESET (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000007L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | (((uint32_t)(value) << 0) & 0x00000007L))
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L) | 0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__DESCWRITEQOSVALUE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000007L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* VDMA_ARB : prio_en */
-/* Description: Enable 2 level priority based channel arbitration in vDMA */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__RESET (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PRIO_EN__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* VDMA_ARB : interleave_en */
-/* Description: Enable arbitration order to interleave between M2D and D2M channels */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__RESET (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__INTERLEAVE_EN__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
-
-/* VDMA_ARB : par */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__SHIFT (31)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__MASK (0x80000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x80000000L) >> 31)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x80000000L) | (((uint32_t)(value) << 31) & 0x80000000L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(1) << 31))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_ARB__PAR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x80000000L) | ((uint32_t)(0) << 31))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QM_CFG_CG_DELAY : val */
-/* Description: Clock cycles to keep clock running after enable condition is met */
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__MASK (0x0000000FL)
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__RESET (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000000FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000000FL) | (((uint32_t)(value) << 0) & 0x0000000FL))
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000000FL) | 0x0000000FL)
-#define DRAM_DMA_ENGINE_CONFIG__QM_CFG_CG_DELAY__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000000FL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDDC_CFG_CG_BYPASS : val */
-/* Description: Bypass QDDC CG */
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDDC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSDC_CFG_CG_BYPASS : val */
-/* Description: Bypass QSDC CG */
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSDC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QDMC_CFG_CG_BYPASS : val */
-/* Description: Bypass QDMC CG */
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QDMC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* QSMC_CFG_CG_BYPASS : val */
-/* Description: Bypass QSMC CG */
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__MASK (0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000FFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | (((uint32_t)(value) << 0) & 0x0000FFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL) | 0x0000FFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__QSMC_CFG_CG_BYPASS__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000FFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_ASF_INT_MASK : parity_error_in_regfile */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_MASK__PARITY_ERROR_IN_REGFILE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_ASF_INT_STATUS : parity_error_in_regfile */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_STATUS__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_ASF_INT_W1C : parity_error_in_regfile */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1C__PARITY_ERROR_IN_REGFILE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_ASF_INT_W1S : parity_error_in_regfile */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_ASF_INT_W1S__PARITY_ERROR_IN_REGFILE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* ENGINE_RW_PARITY_BIST_MODE : val */
-/* Description: write 1 if want to work in rw_parity bist mode in which the parity bit is written by APB wdata and not from HW calculation */
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__ENGINE_RW_PARITY_BIST_MODE__VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* VDMA_STOP_LP : dis */
-/* Description: Write 1 if want to disable LP Stop feature */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__DIS__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* VDMA_STOP_LP : force_val */
-/* Description: Force Stop LP state when feature is enabled */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_STOP_LP__FORCE_VAL__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* VDMA_SCH : stop_th */
-/* Description: Stop scheduling for this many cycles after each successful allocation */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__WIDTH (7)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__MASK (0x0000007FL)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__RESET (0x00000007L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000007FL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000007FL) | (((uint32_t)(value) << 0) & 0x0000007FL))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000007FL) | 0x0000007FL)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_TH__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000007FL))
-
-/* VDMA_SCH : stop_en */
-/* Description: Enable periodic scheduling stopping mechanism */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__SHIFT (7)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__MASK (0x00000080L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__RESET (0x00000080L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000080L) >> 7)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000080L) | (((uint32_t)(value) << 7) & 0x00000080L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000080L) | ((uint32_t)(1) << 7))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__STOP_EN__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000080L) | ((uint32_t)(0) << 7))
-
-/* VDMA_SCH : tsf24_mode */
-/* Description: Apply fix to increase maximum transfers to 24 */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__SHIFT (8)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__MASK (0x00000100L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000100L) >> 8)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000100L) | (((uint32_t)(value) << 8) & 0x00000100L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(1) << 8))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF24_MODE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000100L) | ((uint32_t)(0) << 8))
-
-/* VDMA_SCH : tsf_af_threshold */
-/* Description: Almost Full at 13 allocated TSF (12+8=20). In tsf24_mode should be set to 12. */
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__SHIFT (9)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__WIDTH (5)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__MASK (0x00003E00L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__RESET (0x00002800L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00003E00L) >> 9)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00003E00L) | (((uint32_t)(value) << 9) & 0x00003E00L))
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00003E00L) | 0x00003E00L)
-#define DRAM_DMA_ENGINE_CONFIG__VDMA_SCH__TSF_AF_THRESHOLD__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00003E00L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_SRC_DESC_TRACE : en */
-/* Description: Enable tracing of descriptors read from Source QMs */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__EN__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* CFG_SRC_DESC_TRACE : stop_on_wrap */
-/* Description: Stop when reaching end of tracing buffer */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__STOP_ON_WRAP__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
-
-/* CFG_SRC_DESC_TRACE : mprot */
-/* Description: AWPROT value */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__SHIFT (2)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__MASK (0x0000001CL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000001CL) >> 2)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000001CL) | (((uint32_t)(value) << 2) & 0x0000001CL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001CL) | 0x0000001CL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MPROT__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001CL))
-
-/* CFG_SRC_DESC_TRACE : mcache */
-/* Description: AWCACHE value */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__SHIFT (5)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__MASK (0x000001E0L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__RESET (0x00000020L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000001E0L) >> 5)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000001E0L) | (((uint32_t)(value) << 5) & 0x000001E0L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001E0L) | 0x000001E0L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__MCACHE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001E0L))
-
-/* CFG_SRC_DESC_TRACE : buff_size_m1 */
-/* Description: Buffer size minus 1 in 16B descriptors */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__SHIFT (16)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__MASK (0xFFFF0000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFF0000L) >> 16)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | (((uint32_t)(value) << 16) & 0xFFFF0000L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | 0xFFFF0000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE__BUFF_SIZE_M1__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFF0000L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_SRC_DESC_TRACE_BASE_ADDR : base_addr */
-/* Description: Buffer base address bits 34:4 aligned to 16B */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__WIDTH (31)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__MASK (0x7FFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x7FFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | (((uint32_t)(value) << 0) & 0x7FFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | 0x7FFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_SRC_DESC_TRACE_BASE_ADDR__BASE_ADDR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_DST_DESC_TRACE : en */
-/* Description: Enable tracing of descriptors read from Source QMs */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__EN__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* CFG_DST_DESC_TRACE : stop_on_wrap */
-/* Description: Stop when reaching end of tracing buffer */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__STOP_ON_WRAP__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
-
-/* CFG_DST_DESC_TRACE : mprot */
-/* Description: AWPROT value */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__SHIFT (2)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__WIDTH (3)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__MASK (0x0000001CL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x0000001CL) >> 2)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x0000001CL) | (((uint32_t)(value) << 2) & 0x0000001CL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001CL) | 0x0000001CL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MPROT__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x0000001CL))
-
-/* CFG_DST_DESC_TRACE : mcache */
-/* Description: AWCACHE value. MER-3804 ECO: Note that bit 3 is double booked for timeout ExtRef default value which needs to be 1. In case debug tracing is enabled */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__SHIFT (5)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__WIDTH (4)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__MASK (0x000001E0L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__RESET (0x00000120L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x000001E0L) >> 5)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x000001E0L) | (((uint32_t)(value) << 5) & 0x000001E0L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001E0L) | 0x000001E0L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__MCACHE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x000001E0L))
-
-/* CFG_DST_DESC_TRACE : buff_size_m1 */
-/* Description: Buffer size minus 1 in 16B descriptors */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__SHIFT (16)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__WIDTH (16)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__MASK (0xFFFF0000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFF0000L) >> 16)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | (((uint32_t)(value) << 16) & 0xFFFF0000L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFF0000L) | 0xFFFF0000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE__BUFF_SIZE_M1__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0xFFFF0000L))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_DST_DESC_TRACE_BASE_ADDR : base_addr */
-/* Description: Buffer base address bits 34:4 aligned to 16B. MER-3804 ECO: Note that bits 17:16 are double booked for timeout ExtRef mux. In case debug tracing and ExtRef are required to be turned on this constrain the base address bits 17:16 to be the same as the timestamp mux */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__WIDTH (31)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__MASK (0x7FFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x7FFFFFFFL) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | (((uint32_t)(value) << 0) & 0x7FFFFFFFL))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL) | 0x7FFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DST_DESC_TRACE_BASE_ADDR__BASE_ADDR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x7FFFFFFFL))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* CFG_DEBUG_TIMESTAMP : en */
-/* Description: Write 1 to enable timestamp counter for debug logic */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__EN__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-/* CFG_DEBUG_TIMESTAMP : clr */
-/* Description: Write 1 to clear timestamp counter. After writing 1 to this field need to write 0 immediately */
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__SHIFT (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__MASK (0x00000002L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000002L) >> 1)
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | (((uint32_t)(value) << 1) & 0x00000002L))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(1) << 1))
-#define DRAM_DMA_ENGINE_CONFIG__CFG_DEBUG_TIMESTAMP__CLR__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000002L) | ((uint32_t)(0) << 1))
-
-/*----------------------------------------------------------------------------------------------------*/
-/* DEBUG_TIMESTAMP : val */
-#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__WIDTH (32)
-#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__MASK (0xFFFFFFFFL)
-#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__RESET (0x00000000L)
-#define DRAM_DMA_ENGINE_CONFIG__DEBUG_TIMESTAMP__VAL__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0xFFFFFFFFL) >> 0)
-
-/*----------------------------------------------------------------------------------------------------*/
-/* AUTO_ADDRESS_ERR_CB_INDICATION : enable */
-/* Description: default is 1, meaning the address error is enabled, to hide the address error indication, set to 0 */
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__SHIFT (0)
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__WIDTH (1)
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__MASK (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__RESET (0x00000001L)
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__READ(reg_offset) \
- (((uint32_t)(reg_offset) & 0x00000001L) >> 0)
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__MODIFY(reg_offset, value) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | (((uint32_t)(value) << 0) & 0x00000001L))
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__SET(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(1) << 0))
-#define DRAM_DMA_ENGINE_CONFIG__AUTO_ADDRESS_ERR_CB_INDICATION__ENABLE__CLR(reg_offset) \
- (reg_offset) = (((reg_offset) & ~0x00000001L) | ((uint32_t)(0) << 0))
-
-
-#endif /* DRAM_DMA_ENGINE_CONFIG_MACRO_H */
+++ /dev/null
-/*-------------------------------------------------------------------------------------
-// Copyright (c) 2022 by Hailotech This model is the confidential and
-// proprietary property of Hailotech and the possession or use of this
-// file requires a written license from Hailotech.
--------------------------------------------------------------------------------------*/
-
-
-
-#include <stdint.h>
-
-#ifndef DRAM_DMA_ENGINE_CONFIG_REGS_H
-#define DRAM_DMA_ENGINE_CONFIG_REGS_H
-
-#include "dram_dma_package_macros.h"
-#include "dram_dma_engine_config_macros.h"
-
-typedef struct DRAM_DMA_ENGINE_CONFIG_regs_s {
- volatile uint32_t QddcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x0 ; repeat: [16] */
- volatile uint32_t QddcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x40 ; repeat: [16] */
- volatile uint32_t QddcMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x80 ; repeat: [16] */
- volatile uint32_t QddcAddBurstVal[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0xc0 ; repeat: [16] */
- volatile uint32_t QddcMaxDesc[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x100 ; repeat: [16] */
- volatile uint32_t QddcShmifoId[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x140 ; repeat: [16] */
- volatile uint32_t QddcShmifoCreditSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x180 ; repeat: [16] */
- volatile uint32_t QddcShmifoInitCredit[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x1c0 ; repeat: [16] */
- volatile uint32_t QsdcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x200 ; repeat: [16] */
- volatile uint32_t QsdcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x240 ; repeat: [16] */
- volatile uint32_t QsdcMaxDesc[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x280 ; repeat: [16] */
- volatile uint32_t QsdcShmifoId[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x2c0 ; repeat: [16] */
- volatile uint32_t QsdcShmifoCreditSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x300 ; repeat: [16] */
- volatile uint32_t QsdcFullNumPatterns[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN]; /* offset: 0x340 ; repeat: [4] */
- volatile uint32_t QsdcFullPatternNumLines[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x350 ; repeat: [4, 4] */
- volatile uint32_t QsdcFullPatternNumPages[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x390 ; repeat: [4, 4] */
- volatile uint32_t QsdcFullPatternPageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x3d0 ; repeat: [4, 4] */
- volatile uint32_t QsdcFullPatternResiduePageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN][DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS];/* offset: 0x410 ; repeat: [4, 4] */
- volatile uint32_t QsdcSimpPatternNumPages[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN]; /* offset: 0x450 ; repeat: [12] */
- volatile uint32_t QsdcSimpPatternPageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN]; /* offset: 0x480 ; repeat: [12] */
- volatile uint32_t QsdcSimpPatternResiduePageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN]; /* offset: 0x4b0 ; repeat: [12] */
- volatile uint32_t QdmcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x4e0 ; repeat: [16] */
- volatile uint32_t QdmcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x520 ; repeat: [16] */
- volatile uint32_t QdmcMemBaseAddr[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x560 ; repeat: [16] */
- volatile uint32_t QdmcMemCcbSizeLog2[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH]; /* offset: 0x5a0 ; repeat: [12] */
- volatile uint32_t QdmcDescCsInterrupt[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x5d0 ; repeat: [16] */
- volatile uint32_t QdmcBankInterleaveMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x610 ; repeat: [16] */
- volatile uint32_t QdmcMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x650 ; repeat: [4] */
- volatile uint32_t QdmcAddBurstVal[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x660 ; repeat: [4] */
- volatile uint32_t QdmcMemCcbSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x670 ; repeat: [4] */
- volatile uint32_t QdmcDescPeriphInterrupt[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x680 ; repeat: [4] */
- volatile uint32_t QdmcCcbProcessedIndex[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x690 ; repeat: [4] */
- volatile uint32_t QsmcEnable[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x6a0 ; repeat: [16] */
- volatile uint32_t QsmcReset[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x6e0 ; repeat: [16] */
- volatile uint32_t QsmcMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x720 ; repeat: [16] */
- volatile uint32_t QsmcC2cSel[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x760 ; repeat: [16] */
- volatile uint32_t QsmcAddBurstVal[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x7a0 ; repeat: [16] */
- volatile uint32_t QsmcMemBaseAddr[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x7e0 ; repeat: [16] */
- volatile uint32_t QsmcMemCcbSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x820 ; repeat: [16] */
- volatile uint32_t QsmcPageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x860 ; repeat: [16] */
- volatile uint32_t QsmcSimpPatternNumPages[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x8a0 ; repeat: [16] */
- volatile uint32_t QsmcSimpPatternResiduePageSize[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x8e0 ; repeat: [16] */
- volatile uint32_t QsmcBankInterleaveMode[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH]; /* offset: 0x920 ; repeat: [16] */
- volatile uint32_t QsmcDescPeriphInterrupt[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x960 ; repeat: [4] */
- volatile uint32_t QsmcCcbFreeIndex[DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH]; /* offset: 0x970 ; repeat: [4] */
- volatile uint32_t engine_cs_intr_mask; /* offset: 0x980 ; repeat: [1] */
- volatile uint32_t engine_cs_intr_status; /* offset: 0x984 ; repeat: [1] */
- volatile uint32_t engine_cs_intr_w1c; /* offset: 0x988 ; repeat: [1] */
- volatile uint32_t engine_cs_intr_w1s; /* offset: 0x98c ; repeat: [1] */
- volatile uint32_t engine_ap_intr_mask; /* offset: 0x990 ; repeat: [1] */
- volatile uint32_t engine_ap_intr_status; /* offset: 0x994 ; repeat: [1] */
- volatile uint32_t engine_ap_intr_w1c; /* offset: 0x998 ; repeat: [1] */
- volatile uint32_t engine_ap_intr_w1s; /* offset: 0x99c ; repeat: [1] */
- volatile uint32_t engine_dsp_intr_mask; /* offset: 0x9a0 ; repeat: [1] */
- volatile uint32_t engine_dsp_intr_status; /* offset: 0x9a4 ; repeat: [1] */
- volatile uint32_t engine_dsp_intr_w1c; /* offset: 0x9a8 ; repeat: [1] */
- volatile uint32_t engine_dsp_intr_w1s; /* offset: 0x9ac ; repeat: [1] */
- volatile uint32_t engine_err_intr_mask; /* offset: 0x9b0 ; repeat: [1] */
- volatile uint32_t engine_err_intr_status; /* offset: 0x9b4 ; repeat: [1] */
- volatile uint32_t desc_err_intr_mask; /* offset: 0x9b8 ; repeat: [1] */
- volatile uint32_t desc_err_intr_status; /* offset: 0x9bc ; repeat: [1] */
- volatile uint32_t desc_err_intr_w1c; /* offset: 0x9c0 ; repeat: [1] */
- volatile uint32_t desc_err_intr_w1s; /* offset: 0x9c4 ; repeat: [1] */
- volatile uint32_t qddc_crd_ovf_err_intr_mask; /* offset: 0x9c8 ; repeat: [1] */
- volatile uint32_t qddc_crd_ovf_err_intr_status; /* offset: 0x9cc ; repeat: [1] */
- volatile uint32_t qddc_crd_ovf_err_intr_w1c; /* offset: 0x9d0 ; repeat: [1] */
- volatile uint32_t qddc_crd_ovf_err_intr_w1s; /* offset: 0x9d4 ; repeat: [1] */
- volatile uint32_t qsdc_crd_ovf_err_intr_mask; /* offset: 0x9d8 ; repeat: [1] */
- volatile uint32_t qsdc_crd_ovf_err_intr_status; /* offset: 0x9dc ; repeat: [1] */
- volatile uint32_t qsdc_crd_ovf_err_intr_w1c; /* offset: 0x9e0 ; repeat: [1] */
- volatile uint32_t qsdc_crd_ovf_err_intr_w1s; /* offset: 0x9e4 ; repeat: [1] */
- volatile uint32_t EngErrInterruptSource; /* offset: 0x9e8 ; repeat: [1] */
- volatile uint32_t EngErrRemainPageSize; /* offset: 0x9ec ; repeat: [1] */
- volatile uint32_t EngTransferPageSize; /* offset: 0x9f0 ; repeat: [1] */
- volatile uint32_t VdmaSoftReset; /* offset: 0x9f4 ; repeat: [1] */
- volatile uint32_t vdma_sharedbus; /* offset: 0x9f8 ; repeat: [1] */
- volatile uint32_t cfg_qddc_redundant_en; /* offset: 0x9fc ; repeat: [1] */
- volatile uint32_t cfg_qsdc_redundant_en; /* offset: 0xa00 ; repeat: [1] */
- volatile uint32_t cfg_qdmc_redundant_en; /* offset: 0xa04 ; repeat: [1] */
- volatile uint32_t cfg_qsmc_redundant_en; /* offset: 0xa08 ; repeat: [1] */
- volatile uint32_t qddc_redundant_asf_int_mask; /* offset: 0xa0c ; repeat: [1] */
- volatile uint32_t qddc_redundant_asf_int_status; /* offset: 0xa10 ; repeat: [1] */
- volatile uint32_t qddc_redundant_asf_int_w1c; /* offset: 0xa14 ; repeat: [1] */
- volatile uint32_t qddc_redundant_asf_int_w1s; /* offset: 0xa18 ; repeat: [1] */
- volatile uint32_t qsdc_redundant_asf_int_mask; /* offset: 0xa1c ; repeat: [1] */
- volatile uint32_t qsdc_redundant_asf_int_status; /* offset: 0xa20 ; repeat: [1] */
- volatile uint32_t qsdc_redundant_asf_int_w1c; /* offset: 0xa24 ; repeat: [1] */
- volatile uint32_t qsdc_redundant_asf_int_w1s; /* offset: 0xa28 ; repeat: [1] */
- volatile uint32_t qdmc_redundant_asf_int_mask; /* offset: 0xa2c ; repeat: [1] */
- volatile uint32_t qdmc_redundant_asf_int_status; /* offset: 0xa30 ; repeat: [1] */
- volatile uint32_t qdmc_redundant_asf_int_w1c; /* offset: 0xa34 ; repeat: [1] */
- volatile uint32_t qdmc_redundant_asf_int_w1s; /* offset: 0xa38 ; repeat: [1] */
- volatile uint32_t qsmc_redundant_asf_int_mask; /* offset: 0xa3c ; repeat: [1] */
- volatile uint32_t qsmc_redundant_asf_int_status; /* offset: 0xa40 ; repeat: [1] */
- volatile uint32_t qsmc_redundant_asf_int_w1c; /* offset: 0xa44 ; repeat: [1] */
- volatile uint32_t qsmc_redundant_asf_int_w1s; /* offset: 0xa48 ; repeat: [1] */
- volatile uint32_t PrioIsLp; /* offset: 0xa4c ; repeat: [1] */
- volatile uint32_t ReadLpToQosValue; /* offset: 0xa50 ; repeat: [1] */
- volatile uint32_t ReadHpToQosValue; /* offset: 0xa54 ; repeat: [1] */
- volatile uint32_t WriteLpToQosValue; /* offset: 0xa58 ; repeat: [1] */
- volatile uint32_t WriteHpToQosValue; /* offset: 0xa5c ; repeat: [1] */
- volatile uint32_t DescReadQosValue; /* offset: 0xa60 ; repeat: [1] */
- volatile uint32_t DescWriteQosValue; /* offset: 0xa64 ; repeat: [1] */
- volatile uint32_t vdma_arb; /* offset: 0xa68 ; repeat: [1] */
- volatile uint32_t qm_cfg_cg_delay; /* offset: 0xa6c ; repeat: [1] */
- volatile uint32_t qddc_cfg_cg_bypass; /* offset: 0xa70 ; repeat: [1] */
- volatile uint32_t qsdc_cfg_cg_bypass; /* offset: 0xa74 ; repeat: [1] */
- volatile uint32_t qdmc_cfg_cg_bypass; /* offset: 0xa78 ; repeat: [1] */
- volatile uint32_t qsmc_cfg_cg_bypass; /* offset: 0xa7c ; repeat: [1] */
- volatile uint32_t engine_asf_int_mask; /* offset: 0xa80 ; repeat: [1] */
- volatile uint32_t engine_asf_int_status; /* offset: 0xa84 ; repeat: [1] */
- volatile uint32_t engine_asf_int_w1c; /* offset: 0xa88 ; repeat: [1] */
- volatile uint32_t engine_asf_int_w1s; /* offset: 0xa8c ; repeat: [1] */
- volatile uint32_t engine_rw_parity_bist_mode; /* offset: 0xa90 ; repeat: [1] */
- volatile uint32_t vdma_stop_lp; /* offset: 0xa94 ; repeat: [1] */
- volatile uint32_t vdma_sch; /* offset: 0xa98 ; repeat: [1] */
- volatile uint32_t cfg_src_desc_trace; /* offset: 0xa9c ; repeat: [1] */
- volatile uint32_t cfg_src_desc_trace_base_addr; /* offset: 0xaa0 ; repeat: [1] */
- volatile uint32_t cfg_dst_desc_trace; /* offset: 0xaa4 ; repeat: [1] */
- volatile uint32_t cfg_dst_desc_trace_base_addr; /* offset: 0xaa8 ; repeat: [1] */
- volatile uint32_t cfg_debug_timestamp; /* offset: 0xaac ; repeat: [1] */
- volatile uint32_t debug_timestamp; /* offset: 0xab0 ; repeat: [1] */
- volatile uint32_t auto_address_err_cb_indication; /* offset: 0xab4 ; repeat: [1] */
-} DRAM_DMA_ENGINE_CONFIG_t;
-
-#endif /* DRAM_DMA_ENGINE_CONFIG_REGS_H */
+++ /dev/null
-/*-------------------------------------------------------------------------------------
-// Copyright (c) 2022 by Hailotech This model is the confidential and
-// proprietary property of Hailotech and the possession or use of this
-// file requires a written license from Hailotech.
--------------------------------------------------------------------------------------*/
-
-
-
-#include <stdint.h>
-
-#ifndef DRAM_DMA_PACKAGE_MACROS_H
-#define DRAM_DMA_PACKAGE_MACROS_H
-
-/* HW constants and parameters for package "dram_dma" */
-
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_AXI_QOS_BITS (3)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_CH (32)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_CH_RX_CREDIT (4096)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_CH_TX_CREDIT (2048)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DD_DESC (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DESC (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DM_DESC (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_ENHANCED_CH (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_FULL_PATTERN (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_MAX_PATTERNS (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_PATTERNS_MAX_LINES (262144)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_PATTERNS_MAX_PAGES (262144)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH (12)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_RX_SHMIFO (24)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SD_DESC (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SIMP_PATTERN (12)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SM_DESC (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SW_CH (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_SW_INT (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_TX_SHMIFO (20)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__PAGE_SIZE_MAX (13)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__PAGE_SIZE_MAX_8B (10)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_BURST_SIZE (29)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_BURST_SIZE_8B (26)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_C2C_SEL (6)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CCB_DESC_INDEX (18)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CCB_DESC_INDEX_LOG (5)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CFG_DATA (32)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH (5)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH_CREDIT_SIZE (10)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH_RX_CREDIT (13)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CH_TX_CREDIT (12)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CORE_ADDR (35)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CORE_BASE_ADDR (29)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_CSR_CFG_ADDR (13)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DDR_ADDR (35)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DDR_BASE_ADDR (26)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DD_DESC (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DESC (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DESC_DEMUX_ADDR (43)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DIR_CH (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_DM_DESC (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_ENG_CFG_ADDR (14)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_MAX_PATTERNS (2)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_PATTERNS_MAX_LINES (18)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_PATTERNS_MAX_PAGES (18)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SD_DESC (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SHMIFO (5)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SM_DESC (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_SW_CH (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ADDR (64)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_DATA_DATA (64)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_DATA_DESC (128)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ID_DATA0 (2)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ID_DATA1 (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_AXI_ID_DESC (3)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_CFG_ADDR (10)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_MEM_ADDR (5)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__W_VDMA_MEM_DATA (256)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__ADDR_ALLSTRB_OFFSET (56)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__ADDR_APCMD_OFFSET (55)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__FPGA_N_HW_DMA_ENG (0)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_DESC_AXI (1)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_DMA_ENG (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_HMASTER (4)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_HW_DMA_ENG (3)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_SW_DMA_ENG (1)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_TOT_DMA_DIR_CH (48)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__N_VISION_CH (10)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_CFG_ADDR (16)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_CFG_DATA (32)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_CSR_CFG_ADDR (12)
-#define DRAM_DMA_PACKAGE__DRAM_DMA_WRAPPER__W_TOT_DMA_DIR_CH (6)
-
-
-#endif /* DRAM_DMA_PACKAGE_MACROS_H */
+++ /dev/null
-/**
- * @file mercury_fields.cpp
- * @brief Contains all memory fields related to mercury
- */
-
-#include "mercury_fields.hpp"
-#include "hw_consts/mercury/dram_dma_engine_config_regs.h"
-
-// Implement our own offsetof to allow access to array
-#define my_offsetof(type,field) ((size_t)(&(((type*)(0))->field)))
-#define dram_dma_offsetof(field) my_offsetof(DRAM_DMA_ENGINE_CONFIG_t, field)
-
-
-static constexpr auto CCB_ADDRESS_SHIFT = 9;
-
-
-QddcField::QddcField() :
- Field("qddc", "Queue dest device channel (qddc)")
-{}
-
-size_t QddcField::elements_count() const
-{
- return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
-}
-
-std::string QddcField::print_element(MemorySource& memory, size_t index) const
-{
- return fmt::format("qddc[{}] enabled={} mode={} shmifo_id={}\n", index,
- is_enabled(memory, index), mode(memory, index), shmifo_id(memory, index));
-}
-
-bool QddcField::is_enabled(MemorySource &memory, size_t index) const
-{
- return (1 == memory.read<uint32_t>(dram_dma_offsetof(QddcEnable[index])));
-}
-
-uint32_t QddcField::shmifo_id(MemorySource &memory, size_t index) const
-{
- return memory.read<uint32_t>(dram_dma_offsetof(QddcShmifoId[index]));
-}
-
-std::string QddcField::mode(MemorySource &memory, size_t index) const
-{
- const auto mode = memory.read<uint32_t>(dram_dma_offsetof(QddcMode[index]));
- switch (mode) {
- case 0: return "CONTINUOUS";
- case 1: return "BURST";
- default:
- return fmt::format("Unknown {}", mode);
- }
-}
-
-QsdcField::QsdcField() :
- Field("qsdc", "Queue source device channel (qsdc)")
-{}
-
-size_t QsdcField::elements_count() const
-{
- return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
-}
-
-std::string QsdcField::print_element(MemorySource& memory, size_t index) const
-{
- return fmt::format("qsdc[{}] enabled={} shmifo_id={}\n", index,
- is_enabled(memory, index), shmifo_id(memory, index));
-}
-
-bool QsdcField::is_enabled(MemorySource &memory, size_t index) const
-{
- return (1 == memory.read<uint32_t>(dram_dma_offsetof(QsdcEnable[index])));
-}
-
-uint32_t QsdcField::shmifo_id(MemorySource &memory, size_t index) const
-{
- return memory.read<uint32_t>(dram_dma_offsetof(QsdcShmifoId[index]));
-}
-
-QdmcField::QdmcField() :
- Field("qdmc", "Queue dest memory channel (qdmc)")
-{}
-
-size_t QdmcField::elements_count() const
-{
- return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
-}
-
-std::string QdmcField::print_element(MemorySource& memory, size_t index) const
-{
- return fmt::format("qdmc[{}] enabled={} address=0x{:x} desc_count={} desc_per_irq={}\n", index,
- is_enabled(memory, index), base_address(memory, index), descriptors_count(memory, index),
- descriptors_per_irq(memory, index));
-}
-
-bool QdmcField::is_enabled(MemorySource &memory, size_t index) const
-{
- return (1 == memory.read<uint32_t>(dram_dma_offsetof(QdmcEnable[index])));
-}
-
-uint64_t QdmcField::base_address(MemorySource &memory, size_t index) const
-{
- const uint64_t address = memory.read<uint32_t>(dram_dma_offsetof(QdmcMemBaseAddr[index]));
- return address << CCB_ADDRESS_SHIFT;
-}
-
-uint32_t QdmcField::descriptors_count(MemorySource &memory, size_t index) const
-{
- if (index > DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH) {
- return memory.read<uint32_t>(dram_dma_offsetof(QdmcMemCcbSize[index - DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_REGULAR_CH]));
- }
- else {
- const auto desc_count_log2 = memory.read<uint32_t>(dram_dma_offsetof(QdmcMemCcbSizeLog2[index]));
- uint32_t size = 1;
- for (uint32_t i = 0; i < desc_count_log2; i++) {
- size <<= 1;
- }
- return size;
- }
-}
-
-uint32_t QdmcField::descriptors_per_irq(MemorySource &memory, size_t index) const
-{
- return memory.read<uint32_t>(dram_dma_offsetof(QdmcDescCsInterrupt[index]));
-}
-
-QsmcField::QsmcField() :
- Field("qsmc", "Queue source memory channel (qsmc)")
-{}
-
-size_t QsmcField::elements_count() const
-{
- return DRAM_DMA_PACKAGE__DRAM_DMA_ENGINE__N_DIR_CH;
-}
-
-std::string QsmcField::print_element(MemorySource& memory, size_t index) const
-{
- return fmt::format("qdmc[{}] mode={} enabled={} address=0x{:x} desc_count={}\n", index,
- mode(memory, index), is_enabled(memory, index), base_address(memory, index), descriptors_count(memory, index));
-}
-
-bool QsmcField::is_enabled(MemorySource &memory, size_t index) const
-{
- return (1 == memory.read<uint32_t>(dram_dma_offsetof(QsmcEnable[index])));
-}
-
-uint64_t QsmcField::base_address(MemorySource &memory, size_t index) const
-{
- const uint64_t address = memory.read<uint32_t>(dram_dma_offsetof(QsmcMemBaseAddr[index]));
- return address << CCB_ADDRESS_SHIFT;
-}
-
-uint32_t QsmcField::descriptors_count(MemorySource &memory, size_t index) const
-{
- const auto desc_count = memory.read<uint32_t>(dram_dma_offsetof(QsmcMemCcbSize[index]));
- return desc_count + 1; // The reg contains desc_count-1
-}
-
-std::string QsmcField::mode(MemorySource &memory, size_t index) const
-{
- const auto mode = memory.read<uint32_t>(dram_dma_offsetof(QsmcMode[index]));
- switch (mode) {
- case 0: return "CONTINUOUS";
- case 2: return "BURST";
- case 3: // C2C mode
- {
- auto c2c_sel = memory.read<uint32_t>(dram_dma_offsetof(QsmcC2cSel[index]));
- return fmt::format("C2C (from {})", c2c_sel);
- }
- default:
- return fmt::format("Unknown {}", mode);
- }
-}
+++ /dev/null
-/**
- * @file mercury_fields.hpp
- * @brief Contains all memory fields related to mercury
- */
-
-#ifndef _HW_DEBUG_MERCURY_FIELDS_H_
-#define _HW_DEBUG_MERCURY_FIELDS_H_
-
-#include "memory_commands.hpp"
-
-
-class QddcField : public Field {
-public:
- QddcField();
-
- virtual size_t elements_count() const override;
- virtual std::string print_element(MemorySource& memory, size_t index) const override;
-
-private:
- bool is_enabled(MemorySource &memory, size_t index) const;
- uint32_t shmifo_id(MemorySource &memory, size_t index) const;
- std::string mode(MemorySource &memory, size_t index) const;
-};
-
-class QsdcField : public Field {
-public:
- QsdcField();
-
- virtual size_t elements_count() const override;
- virtual std::string print_element(MemorySource& memory, size_t index) const override;
-
-private:
- bool is_enabled(MemorySource &memory, size_t index) const;
- uint32_t shmifo_id(MemorySource &memory, size_t index) const;
-};
-
-
-class QdmcField : public Field {
-public:
- QdmcField();
-
- virtual size_t elements_count() const override;
- virtual std::string print_element(MemorySource& memory, size_t index) const override;
-
-private:
- bool is_enabled(MemorySource &memory, size_t index) const;
- uint64_t base_address(MemorySource &memory, size_t index) const;
- uint32_t descriptors_count(MemorySource &memory, size_t index) const;
- uint32_t descriptors_per_irq(MemorySource &memory, size_t index) const;
-};
-
-class QsmcField : public Field {
-public:
- QsmcField();
-
- virtual size_t elements_count() const override;
- virtual std::string print_element(MemorySource& memory, size_t index) const override;
-
-private:
- bool is_enabled(MemorySource &memory, size_t index) const;
- uint64_t base_address(MemorySource &memory, size_t index) const;
- uint32_t descriptors_count(MemorySource &memory, size_t index) const;
- std::string mode(MemorySource &memory, size_t index) const;
-};
-
-#endif /* _HW_DEBUG_MERCURY_FIELDS_H_ */