#include <memory>
#include <string>
+#include <map>
#include <beyond/platform/beyond_platform.h>
#include <beyond/private/beyond_private.h>
char payload[1];
};
+ static const std::map<std::string, int> sizeMap;
+
private:
Peer(void);
virtual ~Peer(void);
static void ConfigureImageInput(beyond_input_config *config, std::ostringstream &client_format, std::ostringstream &server_format);
static void ConfigureVideoInput(beyond_input_config *config, std::ostringstream &client_format, std::ostringstream &server_format);
+ static beyond_tensor_info *AllocInputTensorInfo(beyond_input_config *config);
static void ResetInfo(beyond_peer_info *&info);
static void ResetRuntime(beyond_peer_info_runtime *&runtimes, int count_of_runtimes);
static beyond_plugin_peer_nn_config *DuplicateConfig(const beyond_plugin_peer_nn_config *config);
static void FreeConfig(beyond_plugin_peer_nn_config *&config);
+ static void FreeInputTensorInfo(beyond_tensor_info *&info);
+
private:
std::unique_ptr<ServerContext> serverCtx;
std::unique_ptr<ClientContext> clientCtx;
beyond_plugin_peer_nn_config *reservedConfiguration;
beyond::AuthenticatorInterface *authenticator;
beyond::AuthenticatorInterface *caAuthenticator;
+
+ beyond_tensor_info *configuredInputTensorInfo;
};
#endif // __BEYOND_PEER_NN_PEER_H__
#include <unistd.h>
#define DEFAULT_FRAMEWORK "tensorflow-lite"
+#define CONFIGURED_INPUT_TENSOR_SIZE 1
+#define CONFIGURED_INPUT_TENSOR_DIMS 2
+
+// TODO:
+// We have to update the sizeMap with more precise values for each of image format
+const std::map<std::string, int> Peer::sizeMap = {
+ {
+ "I420", // 640x480(Y), 320x240(U), 320x240(U) per a pixel
+ 3, // It is enough to represent YUV size in bytes
+ },
+ {
+ "YV12",
+ 3,
+ },
+ {
+ "YUY2",
+ 3,
+ },
+ {
+ "UYVY",
+ 3,
+ },
+ {
+ "Y41B",
+ 3,
+ },
+ {
+ "Y42B",
+ 3,
+ },
+ {
+ "YVYU",
+ 3,
+ },
+ {
+ "Y444",
+ 3,
+ },
+ {
+ "NV21",
+ 3,
+ },
+ {
+ "NV12",
+ 3,
+ },
+ {
+ "RGB",
+ 3,
+ },
+ {
+ "BGR",
+ 3,
+ },
+ {
+ "RGBx",
+ 4,
+ },
+ {
+ "xRGB",
+ 4,
+ },
+ {
+ "BGRx",
+ 4,
+ },
+ {
+ "xBGR",
+ 4,
+ },
+ {
+ "GRAY8",
+ 1,
+ }
+};
Peer *Peer::Create(bool isServer, const char *framework, const char *accel, const char *storagePath)
{
eventObject = nullptr;
ResetInfo(info);
+ FreeConfig(reservedConfiguration);
+ FreeInputTensorInfo(configuredInputTensorInfo);
delete this;
}
server_format << " ! queue leaky=2 max-size-buffers=1";
}
+beyond_tensor_info *Peer::AllocInputTensorInfo(beyond_input_config *config)
+{
+ const beyond_input_image_config *imageConfig = nullptr;
+
+ if (config->input_type == BEYOND_INPUT_TYPE_IMAGE) {
+ imageConfig = &config->config.image;
+ } else if (config->input_type == BEYOND_INPUT_TYPE_VIDEO) {
+ imageConfig = &config->config.video.frame;
+ } else {
+ ErrPrint("Unknown input type configuration");
+ return nullptr;
+ }
+
+ if (imageConfig->format == nullptr) {
+ ErrPrint("Format is not specified");
+ return nullptr;
+ }
+
+ std::map<std::string, int>::const_iterator it = sizeMap.find(imageConfig->format);
+ if (it == sizeMap.end()) {
+ ErrPrint("Unsupported format [%s]", imageConfig->format);
+ return nullptr;
+ }
+
+ beyond_tensor_info *info = static_cast<beyond_tensor_info *>(malloc(sizeof(beyond_tensor_info) * CONFIGURED_INPUT_TENSOR_SIZE));
+ if (info == nullptr) {
+ ErrPrintCode(errno, "malloc");
+ return nullptr;
+ }
+
+ info->size = imageConfig->width * imageConfig->height * it->second;
+ info->type = BEYOND_TENSOR_TYPE_UINT8;
+ info->name = strdup(imageConfig->format);
+ if (info->name == nullptr) {
+ ErrPrintCode(errno, "strdup");
+ free(info);
+ return nullptr;
+ }
+
+ // NOTE:
+ // WIDTH and HEIGHT, dimension size is always 2
+ info->dims = static_cast<beyond_tensor_info::dimensions *>(malloc(sizeof(beyond_tensor_info::dimensions) + sizeof(int) * CONFIGURED_INPUT_TENSOR_DIMS));
+ if (info->dims == nullptr) {
+ ErrPrintCode(errno, "malloc");
+ free(info->name);
+ info->name = nullptr;
+ free(info);
+ return nullptr;
+ }
+
+ // TODO:
+ // This array information should be updated for each image format specification
+ // In case of the YUV, it could be manipulatd as a 3 dimensional array buffer
+ // At least now, image format related buffer layout will be treated as a 2-dimensional
+ // array (width and height) buffer
+ info->dims->size = CONFIGURED_INPUT_TENSOR_DIMS;
+ info->dims->data[0] = imageConfig->width;
+ info->dims->data[1] = imageConfig->height;
+ return info;
+}
+
int Peer::ConfigureInput(const beyond_config *options)
{
if (serverCtx != nullptr) {
return ret;
}
+ beyond_tensor_info *newInfo = AllocInputTensorInfo(config);
+ if (newInfo == nullptr) {
+ int ret = -errno;
+ ErrPrintCode(errno, "strdup");
+ free(client_desc);
+ client_desc = nullptr;
+ free(server_desc);
+ server_desc = nullptr;
+ return ret;
+ }
+
+ FreeInputTensorInfo(configuredInputTensorInfo);
+ configuredInputTensorInfo = newInfo;
_options->client.preprocessing = client_desc;
_options->server.preprocessing = server_desc;
} else {
return -EILSEQ;
}
+ // NOTE:
+ // If there is a configured input information,
+ // GetInputTensorInfo() should return the configured input tensor information
+ if (configuredInputTensorInfo) {
+ size = 1;
+ info = configuredInputTensorInfo;
+ return 0;
+ }
+
const beyond_tensor_info *_info = nullptr;
int _size = 0;
return 0;
}
+void Peer::FreeInputTensorInfo(beyond_tensor_info *&info)
+{
+ if (info == nullptr) {
+ return;
+ }
+
+ free(info->name);
+ info->name = nullptr;
+ free(info->dims);
+ info->dims = nullptr;
+ free(info);
+ info = nullptr;
+}
+
void Peer::FreeTensor(beyond_tensor *&tensor, int size)
{
if (clientCtx == nullptr) {
, reservedConfiguration(nullptr)
, authenticator(nullptr)
, caAuthenticator(nullptr)
+ , configuredInputTensorInfo(nullptr)
{
}