This patch implements network create/destroy in VD NPU Manager.
Signed-off-by: Dongju Chae <dongju.chae@samsung.com>
" <method name='ContextDestroy'>"
" <arg type='t' name='context' direction='in'/>"
" </method>"
+ " <method name='NetworkCreate'>"
+ " <arg type='t' name='ctx_handle' direction='in'/>"
+ " <arg type='i' name='num_files' direction='in'/>"
+ " <arg type='at' name='input_files' direction='in'/>"
+ " <arg type='i' name='in_buffer_type' direction='in'/>"
+ " <arg type='i' name='in_tensor_cnt' direction='in'/>"
+ " <arg type='v' name='in_tensor_names' direction='in'/>"
+ " <arg type='i' name='out_buffer_type' direction='in'/>"
+ " <arg type='i' name='out_tensor_cnt' direction='in'/>"
+ " <arg type='v' name='out_tensor_names' direction='in'/>"
+ " <arg type='t' name='nw_handle' direction='out'/>"
+ " </method>"
+ " <method name='NetworkDestroy'>"
+ " <arg type='t' name='ctx_handle' direction='in'/>"
+ " <arg type='t' name='nw_handle' direction='in'/>"
+ " </method>"
" </interface>"
" </node>";
}
/**
+ * @brief Find npumgr context created
+ */
+static NpumgrContext *
+find_context (npumgr_context handle) {
+ NpumgrContext *context;
+
+ G_LOCK (mutex);
+ context = (NpumgrContext *) g_hash_table_lookup (ctx_table,
+ GSIZE_TO_POINTER (handle));
+ G_UNLOCK (mutex);
+
+ return context;
+}
+
+/**
* @brief Method callback
*/
static void
G_DBUS_ERROR_INVALID_ARGS,
"Invalid arguments detected");
}
+ } else if (g_strcmp0 (method_name, "NetworkCreate") == 0) {
+ npumgr_context ctx_handle = 0;
+ int num_files;
+ GVariantIter *input_files_iter;
+ npumgr_buffer_t in_buffer_type, out_buffer_type;
+ int in_tensor_cnt, out_tensor_cnt;
+ GVariant *in_tensor_names_var;
+ GVariant *out_tensor_names_var;
+
+ g_variant_get (parameters, "(tiatiiviiv)", &ctx_handle, &num_files,
+ &input_files_iter, &in_buffer_type, &in_tensor_cnt,
+ &in_tensor_names_var, &out_buffer_type, &out_tensor_cnt,
+ &out_tensor_names_var);
+
+ if (ctx_handle > 0 && num_files > 0 && in_tensor_cnt < NPUMGR_MAX_DIMS &&
+ out_tensor_cnt < NPUMGR_MAX_DIMS) {
+ NpumgrContext *context = find_context (ctx_handle);
+ if (context == NULL) {
+ g_dbus_method_invocation_return_error (invocation, G_DBUS_ERROR,
+ G_DBUS_ERROR_FAILED,
+ "Unable to add network");
+ return;
+ }
+ NpumgrDevice *device = context->device;
+ GDBusMessage *message = g_dbus_method_invocation_get_message (invocation);
+ GUnixFDList *fd_list = g_dbus_message_get_unix_fd_list (message);
+
+ std::vector<npumgr_network_defn> input_files_vec;
+ npumgr_network_defn input_file;
+ while (g_variant_iter_loop (input_files_iter, "t", &input_file)) {
+ input_file.fd = g_unix_fd_list_get (fd_list, 0, NULL);
+ input_files_vec.push_back (input_file);
+ }
+ g_variant_iter_free (input_files_iter);
+
+ gchar **in_tensor_names = g_variant_dup_strv (in_tensor_names_var, NULL);
+ gchar **out_tensor_names =
+ g_variant_dup_strv (out_tensor_names_var, NULL);
+
+ npumgr_network nw_handle;
+ npumgr_status_e status;
+
+ status = NPUMGR_DEVICE_GET_CLASS (device)->network_create (
+ device, ctx_handle, num_files, input_files_vec.data (),
+ in_buffer_type, in_tensor_cnt, in_tensor_names, out_buffer_type,
+ out_tensor_cnt, out_tensor_names, &nw_handle);
+
+ g_strfreev (in_tensor_names);
+ g_strfreev (out_tensor_names);
+ g_variant_unref (in_tensor_names_var);
+ g_variant_unref (out_tensor_names_var);
+
+ if (status == NPUMGR_STATUS_SUCCESS) {
+ g_dbus_method_invocation_return_value (
+ invocation, g_variant_new ("(t)", nw_handle));
+ } else {
+ g_dbus_method_invocation_return_error (invocation, G_DBUS_ERROR,
+ G_DBUS_ERROR_FAILED,
+ "Unable to add network");
+ }
+ } else {
+ g_dbus_method_invocation_return_error (invocation, G_DBUS_ERROR,
+ G_DBUS_ERROR_INVALID_ARGS,
+ "Invalid arguments detected");
+ }
+ } else if (g_strcmp0 (method_name, "NetworkDestroy") == 0) {
+ npumgr_context ctx_handle = 0;
+ npumgr_network nw_handle = 0;
+
+ g_variant_get (parameters, "(tt)", (guint64 *) &ctx_handle,
+ (guint64 *) &nw_handle);
+ if (ctx_handle > 0 && nw_handle > 0) {
+ NpumgrContext *context = find_context (ctx_handle);
+ if (context != NULL) {
+ NpumgrDevice *device = context->device;
+ npumgr_status_e status;
+
+ status = NPUMGR_DEVICE_GET_CLASS (device)->network_destroy (
+ device, ctx_handle, nw_handle);
+ if (status == NPUMGR_STATUS_SUCCESS)
+ g_dbus_method_invocation_return_value (invocation, NULL);
+ else
+ g_dbus_method_invocation_return_error (invocation, G_DBUS_ERROR,
+ G_DBUS_ERROR_UNKNOWN_OBJECT,
+ "Unable to find the network");
+ } else {
+ g_dbus_method_invocation_return_error (invocation, G_DBUS_ERROR,
+ G_DBUS_ERROR_UNKNOWN_OBJECT,
+ "Unable to find the context");
+ }
+ } else {
+ g_dbus_method_invocation_return_error (invocation, G_DBUS_ERROR,
+ G_DBUS_ERROR_INVALID_ARGS,
+ "Invalid arguments detected");
+ }
}
}
#include "npumgr_api.h"
+#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <gio/gio.h>
+#include <gio/gunixfdlist.h>
/** 1 second timeout */
#define TIMEOUT 1
npumgr_buffer_t out_buffer_type, int out_tensor_cnt,
const char *const *out_tensor_names,
npumgr_network *out_nw_handle) {
- /* NYI */
- return NPUMGR_STATUS_SUCCESS;
+ GDBusMessage *method_call;
+ GDBusMessage *method_reply;
+ npumgr_status_e status = NPUMGR_STATUS_SUCCESS;
+ GError *error = NULL;
+
+ g_return_val_if_fail (wait_until_connected (), NPUMGR_STATUS_ERR_TIMEOUT);
+
+ method_call = g_dbus_message_new_method_call (
+ _name_owner, "/sr/odl/NPUManager/APIObject", "sr.odl.NPUManager.API",
+ "NetworkCreate");
+
+ GVariantBuilder builder, builder_arr;
+ GUnixFDList *fd_list;
+
+ g_variant_builder_init (&builder, G_VARIANT_TYPE ("(tiatiiviiv)"));
+ g_variant_builder_init (&builder_arr, G_VARIANT_TYPE ("at"));
+
+ fd_list = g_unix_fd_list_new ();
+
+ /* build message body */
+ g_variant_builder_add (&builder, "t", ctx);
+ g_variant_builder_add (&builder, "i", num_files);
+ for (int i = 0; i < num_files; i++) {
+ g_unix_fd_list_append (fd_list, input_files[i].fd, NULL);
+ g_variant_builder_add (&builder_arr, "t", ((uint64_t *) input_files)[i]);
+ }
+ g_dbus_message_set_unix_fd_list (method_call, fd_list);
+
+ g_variant_builder_add_value (&builder, g_variant_builder_end (&builder_arr));
+ g_variant_builder_add (&builder, "i", in_buffer_type);
+ g_variant_builder_add (&builder, "i", in_tensor_cnt);
+ g_variant_builder_add (&builder, "v",
+ g_variant_new_strv (in_tensor_names, in_tensor_cnt));
+ g_variant_builder_add (&builder, "i", out_buffer_type);
+ g_variant_builder_add (&builder, "i", out_tensor_cnt);
+ g_variant_builder_add (&builder, "v",
+ g_variant_new_strv (out_tensor_names, out_tensor_cnt));
+
+ g_dbus_message_set_body (method_call, g_variant_builder_end (&builder));
+ method_reply = g_dbus_connection_send_message_with_reply_sync (
+ _connection, method_call, G_DBUS_SEND_MESSAGE_FLAGS_NONE, -1, NULL, NULL,
+ &error);
+
+ if (method_reply == NULL) {
+ status = NPUMGR_STATUS_ERR_FAIL;
+ goto out;
+ }
+
+ if (g_dbus_message_get_message_type (method_reply) ==
+ G_DBUS_MESSAGE_TYPE_ERROR) {
+ g_dbus_message_to_gerror (method_reply, &error);
+ g_critical ("error: %s\n", error->message);
+ g_error_free (error);
+ status = NPUMGR_STATUS_ERR_FAIL;
+ goto out;
+ }
+
+ g_variant_get (g_dbus_message_get_body (method_reply), "(t)", out_nw_handle);
+
+out:
+ g_object_unref (method_call);
+ g_object_unref (method_reply);
+
+ return status;
}
/**
*/
npumgr_status_e
npumgr_network_destroy (npumgr_context ctx, npumgr_network nw_handle) {
- /* NYI */
- return NPUMGR_STATUS_SUCCESS;
+ GDBusMessage *method_call;
+ GDBusMessage *method_reply;
+ npumgr_status_e status = NPUMGR_STATUS_SUCCESS;
+ GError *error = NULL;
+
+ g_return_val_if_fail (wait_until_connected (), NPUMGR_STATUS_ERR_TIMEOUT);
+
+ method_call = g_dbus_message_new_method_call (
+ _name_owner, "/sr/odl/NPUManager/APIObject", "sr.odl.NPUManager.API",
+ "NetworkDestroy");
+ g_dbus_message_set_body (method_call, g_variant_new ("(tt)", ctx, nw_handle));
+
+ method_reply = g_dbus_connection_send_message_with_reply_sync (
+ _connection, method_call, G_DBUS_SEND_MESSAGE_FLAGS_NONE, -1, NULL, NULL,
+ &error);
+
+ if (method_reply == NULL) {
+ status = NPUMGR_STATUS_ERR_FAIL;
+ goto out;
+ }
+
+ if (g_dbus_message_get_message_type (method_reply) ==
+ G_DBUS_MESSAGE_TYPE_ERROR) {
+ g_dbus_message_to_gerror (method_reply, &error);
+ g_critical ("error: %s\n", error->message);
+ g_error_free (error);
+ status = NPUMGR_STATUS_ERR_FAIL;
+ goto out;
+ }
+
+out:
+ g_object_unref (method_call);
+ g_object_unref (method_reply);
+
+ return status;
}
/**
int
main (int argc, char **argv) {
npumgr_context ctx;
+ npumgr_network nw_handle;
npumgr_devices_id list;
npumgr_status_e status;
- int ret = -EINVAL;
+ int fd, ret = -EINVAL;
+
+ if (argc != 2) {
+ std::cerr << "Please provide network file name\n";
+ return ret;
+ }
status = npumgr_device_get_available_list (&list);
if (status != NPUMGR_STATUS_SUCCESS) {
return ret;
}
+ fd = open (argv[1], O_RDONLY);
+ if (fd >= 0) {
+ npumgr_network_defn input_files[] = {{NPUMGR_NETWORK_FILE_TVN, fd}};
+ const char *input_tensor_names[] = {"input"};
+ const char *output_tensor_names[] = {"output"};
+
+ status = npumgr_network_create (
+ ctx, 1, input_files, NPUMGR_BUF_TYPE_DRIVER, 1, input_tensor_names,
+ NPUMGR_BUF_TYPE_DRIVER, 1, output_tensor_names, &nw_handle);
+ if (status != NPUMGR_STATUS_SUCCESS) {
+ std::cerr << "Unable to create a npumgr network, " << status << "\n";
+ goto out;
+ }
+
+ status = npumgr_network_destroy (ctx, nw_handle);
+ if (status != NPUMGR_STATUS_SUCCESS) {
+ std::cerr << "Unable to destroy the npumgr network, " << status << "\n";
+ goto out;
+ }
+
+ ret = 0;
+ } else {
+ std::cerr << "Unable to open file " << argv[1] << "\n";
+ }
+
+out:
+ if (fd >= 0)
+ close (fd);
+
status = npumgr_context_destroy (ctx);
if (status != NPUMGR_STATUS_SUCCESS) {
std::cerr << "Unable to destroy the npumgr context, " << status << "\n";
}
/**
+ * @brief Class for triv2 npumgr network
+ */
+class NpumgrNetworkTriv2 {
+ public:
+ NpumgrNetworkTriv2 (const npumgr_network_defn &model_file)
+ : model_file_ (model_file),
+ in_buffer_type_ (NPUMGR_BUF_TYPE_MAX),
+ out_buffer_type_ (NPUMGR_BUF_TYPE_MAX),
+ in_tensor_names_ (NULL),
+ out_tensor_names_ (NULL) {
+ handle_ = g_atomic_int_add (&g_nw_handle, 1);
+ meta_ = (npubin_meta *) g_new0 (npubin_meta, 1);
+ model_file_ = model_file;
+ }
+ ~NpumgrNetworkTriv2 () {
+ close (model_file_.fd);
+ g_strfreev (in_tensor_names_);
+ g_strfreev (out_tensor_names_);
+ g_free (meta_);
+ }
+
+ /** @brief set input buffer type */
+ void setInBufferType (npumgr_buffer_t type) { in_buffer_type_ = type; }
+ /** @brief set input tensor names */
+ void setInTensorNames (gchar **in_tensor_names) {
+ in_tensor_names_ = g_strdupv (in_tensor_names);
+ }
+ gboolean loadModel () {
+ return read (model_file_.fd, (void *) meta_, NPUBIN_META_SIZE) ==
+ NPUBIN_META_SIZE;
+ }
+
+ /** @brief set output buffer type */
+ void setOutBufferType (npumgr_buffer_t type) { out_buffer_type_ = type; }
+ /** @brief set output tensor names */
+ void setOutTensorNames (gchar **out_tensor_names) {
+ out_tensor_names_ = g_strdupv (out_tensor_names);
+ }
+
+ /** @brief get input tensor name */
+ const gchar *getInTensorName (gint idx) { return in_tensor_names_[idx]; }
+ /** @brief get output tensor name */
+ const gchar *getOutTensorName (gint idx) { return out_tensor_names_[idx]; }
+
+ guint getInTensorCnt () { return meta_->input_seg_num; }
+ guint getOutTensorCnt () { return meta_->output_seg_num; }
+
+ /** @brief get network handle */
+ npumgr_network getHandle () { return handle_; }
+
+ guint getInTensorDim (guint i, guint j) {
+ return meta_->input_seg_dims[i][j];
+ }
+ guint getInTensorSize (guint i) {
+ guint size = 1;
+ for (guint j = 0; j < MAX_RANK; j++) size *= getInTensorDim (i, j);
+ return size;
+ }
+ int32_t getInTensorQuantZero (guint i) { return meta_->input_seg_quant_z[i]; }
+ float getInTensorQuantScale (guint i) { return meta_->input_seg_quant_s[i]; }
+
+ guint getOutTensorDim (gint i, gint j) {
+ return meta_->output_seg_dims[i][j];
+ }
+ guint getOutTensorSize (guint i) {
+ guint size = 1;
+ for (guint j = 0; j < MAX_RANK; j++) size *= getOutTensorDim (i, j);
+ return size;
+ }
+ int32_t getOutTensorQuantZero (gint i) {
+ return meta_->output_seg_quant_z[i];
+ }
+ float getOutTensorQuantScale (gint i) { return meta_->output_seg_quant_s[i]; }
+
+ private:
+ static volatile guint g_nw_handle;
+
+ npumgr_network handle_;
+
+ npumgr_network_defn model_file_;
+ npubin_meta *meta_;
+
+ npumgr_buffer_t in_buffer_type_;
+ npumgr_buffer_t out_buffer_type_;
+
+ gchar **in_tensor_names_;
+ gchar **out_tensor_names_;
+};
+
+static void
+nw_destroy (gpointer data) {
+ NpumgrNetworkTriv2 *nw = static_cast<NpumgrNetworkTriv2 *> (data);
+
+ delete nw;
+}
+
+/**
* @brief Class for triv2 npumgr context
*/
class NpumgrContextTriv2 {
NpumgrContextTriv2 (npudev_h dev, int priority)
: dev_ (dev), priority_ (priority) {
handle_ = g_atomic_int_add (&g_ctx_handle, 1);
+ nw_table_ =
+ g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL, nw_destroy);
}
- ~NpumgrContextTriv2 () {}
+ ~NpumgrContextTriv2 () { g_hash_table_destroy (nw_table_); }
+
+ gboolean appendNetwork (npumgr_network handle, NpumgrNetworkTriv2 *nw) {
+ return g_hash_table_insert (nw_table_, GSIZE_TO_POINTER (handle), nw);
+ }
+ gboolean removeNetwork (npumgr_network handle) {
+ return g_hash_table_remove (nw_table_, GSIZE_TO_POINTER (handle));
+ }
npumgr_context getHandle () { return handle_; }
npumgr_context handle_;
npudev_h dev_;
int priority_;
+
+ GHashTable *nw_table_;
};
static void
}
volatile guint NpumgrContextTriv2::g_ctx_handle = 1;
+volatile guint NpumgrNetworkTriv2::g_nw_handle = 1;
/**
* @brief Private members in NpumgrDeviceTriv2
npumgr_context *out_ctx);
static npumgr_status_e triv2_context_destroy (NpumgrDevice *device,
npumgr_context ctx);
+static npumgr_status_e triv2_network_create (
+ NpumgrDevice *device, npumgr_context ctx, int num_files,
+ npumgr_network_defn *input_files, npumgr_buffer_t in_buffer_type,
+ int in_tensor_cnt, char **input_tensor_names,
+ npumgr_buffer_t out_buffer_type, int out_tensor_cnt,
+ char **output_tensor_names, npumgr_network *out_nw_handle);
+static npumgr_status_e triv2_network_destroy (NpumgrDevice *device,
+ npumgr_context ctx,
+ npumgr_network nw_handle);
extern NpumgrDevice *
npumgr_device_triv2_new (void) {
npumgr_device_class->device_get_capabilities = triv2_device_get_capabilities;
npumgr_device_class->context_create = triv2_context_create;
npumgr_device_class->context_destroy = triv2_context_destroy;
+ npumgr_device_class->network_create = triv2_network_create;
+ npumgr_device_class->network_destroy = triv2_network_destroy;
/* NYI */
}
return result;
}
+static NpumgrContextTriv2 *
+find_context (NpumgrDeviceTriv2Private *priv, npumgr_context handle) {
+ NpumgrContextTriv2 *context;
+
+ g_mutex_lock (&priv->mutex);
+ context = (NpumgrContextTriv2 *) g_hash_table_lookup (
+ priv->ctx_table, GSIZE_TO_POINTER (handle));
+ g_mutex_unlock (&priv->mutex);
+
+ return context;
+}
+
static npumgr_status_e
triv2_context_create (NpumgrDevice *device, int device_id, int priority,
npumgr_context *ctx_handle) {
else
return NPUMGR_STATUS_ERR_CTX_INVALID;
}
+
+static npumgr_status_e
+triv2_network_create (NpumgrDevice *device, npumgr_context ctx, int num_files,
+ npumgr_network_defn *input_files,
+ npumgr_buffer_t in_buffer_type, int in_tensor_cnt,
+ char **input_tensor_names,
+ npumgr_buffer_t out_buffer_type, int out_tensor_cnt,
+ char **output_tensor_names, npumgr_network *out_nw) {
+ g_return_val_if_fail (device != NULL, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (ctx != 0, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (num_files == 1, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (input_files != NULL, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (in_tensor_cnt != 0, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (input_tensor_names != NULL,
+ NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (out_tensor_cnt != 0, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (output_tensor_names != NULL,
+ NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (out_nw != NULL, NPUMGR_STATUS_ERR_PARAM_INVALID);
+
+ NpumgrDeviceTriv2 *self = NPUMGR_DEVICE_TRIV2 (device);
+ NpumgrDeviceTriv2Private *priv = NPUMGR_DEVICE_TRIV2_GET_PRIVATE (self);
+ NpumgrContextTriv2 *context = find_context (priv, ctx);
+
+ g_return_val_if_fail (context != NULL, NPUMGR_STATUS_ERR_CTX_INVALID);
+
+ NpumgrNetworkTriv2 *network = new NpumgrNetworkTriv2 (input_files[0]);
+ if (!network->loadModel ())
+ goto err;
+ if (network->getInTensorCnt () != in_tensor_cnt)
+ goto err;
+ if (network->getOutTensorCnt () != out_tensor_cnt)
+ goto err;
+
+ network->setInTensorNames (input_tensor_names);
+ network->setOutTensorNames (output_tensor_names);
+ network->setInBufferType (in_buffer_type);
+ network->setOutBufferType (out_buffer_type);
+
+ if (context->appendNetwork (network->getHandle (), network)) {
+ *out_nw = network->getHandle ();
+ return NPUMGR_STATUS_SUCCESS;
+ }
+
+err:
+ delete network;
+ return NPUMGR_STATUS_ERR_FAIL;
+}
+
+static npumgr_status_e
+triv2_network_destroy (NpumgrDevice *device, npumgr_context ctx_handle,
+ npumgr_network nw_handle) {
+ g_return_val_if_fail (device != NULL, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (ctx_handle != 0, NPUMGR_STATUS_ERR_PARAM_INVALID);
+ g_return_val_if_fail (nw_handle != 0, NPUMGR_STATUS_ERR_PARAM_INVALID);
+
+ NpumgrDeviceTriv2 *self = NPUMGR_DEVICE_TRIV2 (device);
+ NpumgrDeviceTriv2Private *priv = NPUMGR_DEVICE_TRIV2_GET_PRIVATE (self);
+ NpumgrContextTriv2 *context = find_context (priv, ctx_handle);
+
+ g_return_val_if_fail (context != NULL, NPUMGR_STATUS_ERR_CTX_INVALID);
+
+ if (context->removeNetwork (nw_handle)) {
+ return NPUMGR_STATUS_SUCCESS;
+ } else {
+ return NPUMGR_STATUS_ERR_FAIL;
+ }
+}