constexpr char DeviceUseConfigOpt[] = "use_config";
constexpr char DeviceInputConfigTypeOpt[] = "conf_type";
constexpr char DeviceInputConfigFormatOpt[] = "conf_format";
+constexpr char DeviceInputConfigSizeOpt[] = "conf_size";
constexpr char DeviceStatisticsOpt[] = "stat";
constexpr char DeviceAccelOpt[] = "accel";
constexpr char DeviceDummyInputOpt[] = "input_kb"; /* bypass(no inference) experiment */
const int UINT8_BYTES = 1;
const int FLOAT32_BYTES = 4;
-const int IMG_HEIGHT = 100;
-const int IMG_WIDTH = 100;
+const int IMG_SIZE = 100;
const double MICRO_SEC_TO_SEC = 1e-6;
const int KB_TO_BYTE = 1024;
, rep_port_(3001)
, service_port_(3000)
, use_config_(0)
+ , config_size_(IMG_SIZE)
, input_fps_(10)
, repeat_(1)
, num_invoked_(0)
, discovered_(0)
, num_bytes_per_channel_(UINT8_BYTES)
- , dump_stat_(1)
+ , dump_stat_(0)
, input_in_bytes_(0)
, output_in_bytes_(0)
, dummy_input_kb_(0) /* bypass(no inference) experiment */
, bypass_inference_(false)
+ , use_image_raw_(false)
, main_loop_(g_main_loop_new(nullptr, FALSE))
, target_(TargetMode::UNKNOWN)
, invoke_(InvokeMode::SYNC)
~ImageClassification() override
{
+ printf("\n");
if (dump_stat_ == 1) {
PrintStatistics();
}
bool CheckDevice_InferencePeer();
bool CheckDevice_Discovery();
bool LoadLabels();
+ bool LoadImage();
bool CheckEdge_Peer();
bool CheckEdge_Discovery();
bool Run();
bool RunDevice();
bool RunMobilenet();
- bool LoadImage();
bool PrintResult();
void PrintStatistics();
std::string config_format_;
std::string config_type_;
std::string accel_;
+ std::string image_raw_;
unsigned short req_port_;
unsigned short rep_port_;
unsigned short service_port_;
int use_config_;
+ int config_size_;
int input_fps_;
int repeat_;
int num_invoked_;
int discovered_;
int num_bytes_per_channel_;
int dump_stat_;
- int input_in_bytes_;
- int output_in_bytes_;
+ int input_in_bytes_; /* expected input size */
+ int output_in_bytes_; /* expected output size */
int dummy_input_kb_; /* bypass(no inference) experiment */
bool bypass_inference_;
+ bool use_image_raw_;
GMainLoop *main_loop_;
TargetMode target_;
"Peer configuration input type (`image` or `video` | default: image)"),
Option::CreateOption(DeviceInputConfigFormatOpt, &config_format_,
"Peer configuration input format(BGR, RGB, I420, YV12 and so on | default: BGR)"),
+ Option::CreateOption(DeviceInputConfigSizeOpt, &config_size_,
+ "Peer configuration input size (the same width & height | default: 100)"),
Option::CreateOption(DeviceAccelOpt, &accel_,
"Peer inference accel (`cpu` or `gpu` | default: cpu)"),
Option::CreateOption(DeviceInvocationOpt, &invocation_,
{
bool result = true;
- // Note : dummy_input_kb_ is for bypass(no inference) experiment
if (dummy_input_kb_ != 0) {
+ // Note : dummy_input_kb_ is for bypass(no inference) experiment
// skip inference related precondition(input image conversion, model/label loading)
bypass_inference_ = true;
result = CheckCmdline_Device_Bypass();
result = CheckCmdline_Device_Inference();
}
+ // expected input and output data size should be calculated
+ // before tensor info queried from model in runtime
+ if ((input_in_bytes_ <= 0) ||
+ (output_in_bytes_ <= 0)) {
+ printf("Invalid input and output data (input:%d, output:%d)\n", input_in_bytes_, output_in_bytes_);
+ result = false;
+ }
+
if (invocation_.compare("async") == 0) {
invoke_ = InvokeMode::ASYNC;
} else {
// Note : load image after decide use_config_
// convert image refer to config type, format
+ // and calculates input size depends on option
if (LoadImage() == false) {
result = false;
}
+ output_in_bytes_ = OUTPUT_LABELS_NUM * num_bytes_per_channel_;
+
return result;
}
}
model_check.close();
+ if (dummy_input_kb_ <= 0){
+ printf("Invalid input size %d KB\n", dummy_input_kb_);
+ result = false;
+ }
+
+ // check in what size of n kb input and fixed(1001) bytes output the experiment edge supports
+ output_in_bytes_ = OUTPUT_LABELS_NUM;
+ input_in_bytes_ = dummy_input_kb_ * KB_TO_BYTE;
+
// need to check mixed use of beyond_inference_set_input_tensor_info and beyond_peer_configure
// make sure turn off the config, because user could turn it on not on purpose
use_config_= 0;
+ dummy_data_ = static_cast<void *>(malloc(input_in_bytes_));
+ if (dummy_data_ == nullptr) {
+ printf("Failed allocate %d bytes\n", input_in_bytes_);
+ result = false;
+ }
+
// Note : dummy_input_kb_ is for bypass(no inference) experiment
// need n kb transaction with dummy msg inside
// TODO: sequence num will be added on a msg
- int size_in_byte = dummy_input_kb_*KB_TO_BYTE;
- dummy_data_ = static_cast<void *>(malloc(size_in_byte));
-
- beyond_tensor_info::dimensions *in_dims = nullptr;
- in_dims = static_cast<beyond_tensor_info::dimensions *>(malloc(sizeof(beyond_tensor_info::dimensions) + sizeof(int) * 4));
- in_dims->size = 4;
- in_dims->data[0] = 1;
- in_dims->data[1] = 1;
- in_dims->data[2] = 1;
- in_dims->data[3] = size_in_byte;
-
- dummy_tensor_info_.type = BEYOND_TENSOR_TYPE_UINT8;
- dummy_tensor_info_.size = size_in_byte;
- dummy_tensor_info_.name = nullptr;
- dummy_tensor_info_.dims = in_dims;
+ if (result == true) {
+ beyond_tensor_info::dimensions *in_dims = nullptr;
+ in_dims = static_cast<beyond_tensor_info::dimensions *>(malloc(sizeof(beyond_tensor_info::dimensions) + sizeof(int) * 4));
+ in_dims->size = 4;
+ in_dims->data[0] = 1;
+ in_dims->data[1] = 1;
+ in_dims->data[2] = 1;
+ in_dims->data[3] = input_in_bytes_;
+ dummy_tensor_info_.type = BEYOND_TENSOR_TYPE_UINT8;
+ dummy_tensor_info_.size = input_in_bytes_;
+ dummy_tensor_info_.name = nullptr;
+ dummy_tensor_info_.dims = in_dims;
+ }
return result;
}
}
struct beyond_input_image_config image_config = {
.format = format,
- .width = IMG_WIDTH,
- .height = IMG_HEIGHT,
+ .width = config_size_,
+ .height = config_size_,
.convert_format = "RGB",
.convert_width = MOBILENETV1_WIDTH,
.convert_height = MOBILENETV1_HEIGHT,
return true;
}
+bool ImageClassification::LoadImage()
+{
+ cv::Mat inputImage = cv::imread(input_image_path_.c_str(), cv::IMREAD_COLOR);
+
+ if (use_config_ == true) {
+ if (inputImage.empty()) {
+ if (DEBUG == true) {
+ printf("%s is not image file format, but it could be a raw image.\n", input_image_path_.c_str());
+ }
+ // TODO : convert raw file to image_data_
+ std::ifstream dataStream(input_image_path_);
+ if (dataStream.is_open()) {
+ std::string content((std::istreambuf_iterator<char>(dataStream)), std::istreambuf_iterator<char>());
+ image_raw_ = content;
+ if (image_raw_.empty() == false) {
+ use_image_raw_ = true;
+ input_in_bytes_ = image_raw_.size();
+ return true;
+ }
+ }
+ printf("Invalid raw file %s \n", input_image_path_.c_str());
+ return false;
+ } else if (config_format_.compare("BGR") == 0){
+ // Note : read image file using opencv, and resize it width * height
+ if (config_size_ <= 0) {
+ printf("Invalid config size %d \n", config_size_);
+ return false;
+ }
+ cv::resize(inputImage, image_data_, cv::Size(config_size_, config_size_));
+ input_in_bytes_ = config_size_ * config_size_ * 3;
+ } else {
+ printf("Unspported format %s for %s \n", config_format_.c_str(), input_image_path_.c_str());
+ return false;
+ }
+ } else {
+ if (inputImage.empty()) {
+ printf("Cannot load %s. If it is raw media file, than plz --use_config=1 --conf_format={$FORMAT}\n", input_image_path_.c_str());
+ return false;
+ } else {
+ if (DEBUG == true) {
+ printf("Convert %s to tensors (dimenssion 1:%d:%d:3,type=%s) \n", input_image_path_.c_str(), MOBILENETV1_WIDTH, MOBILENETV1_HEIGHT, model_dtype_.c_str());
+ }
+ if (num_bytes_per_channel_ == FLOAT32_BYTES) {
+ cv::Mat temp;
+ cv::resize(inputImage, temp,
+ cv::Size(MOBILENETV1_WIDTH, MOBILENETV1_HEIGHT));
+ cv::cvtColor(temp, temp, cv::COLOR_BGR2RGB);
+ temp.convertTo(image_data_, CV_32FC3, 1/255.0);
+ } else {
+ cv::resize(inputImage, image_data_,
+ cv::Size(MOBILENETV1_WIDTH, MOBILENETV1_HEIGHT));
+ cv::cvtColor(image_data_, image_data_, cv::COLOR_BGR2RGB);
+ }
+ input_in_bytes_ = MOBILENETV1_WIDTH * MOBILENETV1_HEIGHT * 3 * num_bytes_per_channel_;
+ }
+ }
+ return true;
+}
+
bool ImageClassification::CheckEdge_Peer()
{
int ret;
beyond_tensor *beyond_tensors = BEYOND_TENSOR(in_tensor_h);
auto &in = beyond_tensors[0];
- if (num_invoked_ == 0) {
- printf("input (type : 0x%.2X, size : %d bytes)\n", in.type, in.size);
- }
- if (bypass_inference_ == true) {
- memcpy(in.data, dummy_data_, in.size);
- } else {
- memcpy(in.data, image_data_.data, in.size);
+
+ if (DEBUG && (num_invoked_ == 0)) {
+ // the size of tensor and copy data could be different
+ printf("input (tensor type : 0x%.2X, tensor size : %d bytes, copy size : %d bytes)\n", in.type, in.size, input_in_bytes_);
}
- input_in_bytes_ = in.size;
- // invoke
- num_invoked_++;
- if (beyond_inference_do(inference_h_, in_tensor_h, nullptr) < 0) {
- printf("Failed beyond_inference_do\n");
+ if (in.size < input_in_bytes_) {
+ printf("Cannot copy tensor (dst %d bytes, src %d bytes)\n", in.size, input_in_bytes_);
result = false;
}
+
+ if (result == true) {
+ if (bypass_inference_ == true) {
+ memcpy(in.data, dummy_data_, input_in_bytes_);
+ } else if (use_image_raw_ == true) {
+ memcpy(in.data, image_raw_.c_str(), input_in_bytes_);
+ } else {
+ //input_in_bytes_ = in.size;
+ memcpy(in.data, image_data_.data, input_in_bytes_);
+ }
+
+ // invoke
+ num_invoked_++;
+ if (beyond_inference_do(inference_h_, in_tensor_h, nullptr) < 0) {
+ printf("Failed beyond_inference_do\n");
+ result = false;
+ }
+ }
+
beyond_inference_unref_tensor(in_tensor_h);
if ((result == true) &&
return result;
}
-bool ImageClassification::LoadImage()
-{
- cv::Mat inputImage = cv::imread(input_image_path_.c_str(), cv::IMREAD_COLOR);
-
- if (use_config_ == true) {
- if (inputImage.empty()) {
- if (DEBUG == true) {
- printf("Failed to load an image from %s, but it could be a raw image format.\n", input_image_path_.c_str());
- }
- // TODO : convert raw file to image_data_
- printf("Invalid file %s \n", input_image_path_.c_str());
- return false;
- } else if (config_format_.compare("BGR") == 0){
- // Note : read image file using opencv, and resize it 100x100
- cv::resize(inputImage, image_data_, cv::Size(IMG_WIDTH, IMG_HEIGHT));
- } else {
- printf("Unspported format %s for %s \n", config_format_.c_str(), input_image_path_.c_str());
- return false;
- }
- } else {
- if (inputImage.empty()) {
- printf("Invalid file %s \n", input_image_path_.c_str());
- return false;
- } else {
- if (DEBUG == true) {
- printf("Convert %s to tensors (dimenssion 1:%d:%d:3,type=%s) \n", input_image_path_.c_str(), MOBILENETV1_WIDTH, MOBILENETV1_HEIGHT, model_dtype_.c_str());
- }
- if (num_bytes_per_channel_ == FLOAT32_BYTES) {
- cv::Mat temp;
- cv::resize(inputImage, temp,
- cv::Size(MOBILENETV1_WIDTH, MOBILENETV1_HEIGHT));
- cv::cvtColor(temp, temp, cv::COLOR_BGR2RGB);
- temp.convertTo(image_data_, CV_32FC3, 1/255.0);
- } else {
- cv::resize(inputImage, image_data_,
- cv::Size(MOBILENETV1_WIDTH, MOBILENETV1_HEIGHT));
- cv::cvtColor(image_data_, image_data_, cv::COLOR_BGR2RGB);
- }
- }
- }
- return true;
-}
-
bool ImageClassification::PrintResult()
{
beyond_tensor_h out_tensor_h;
int count;
int index = -1;
- int copy_bytes = OUTPUT_LABELS_NUM * num_bytes_per_channel_;
if (beyond_inference_get_output(inference_h_, &out_tensor_h, &count) < 0) {
printf("Failed beyond_inference_get_output\n");
beyond_tensor *tensors = BEYOND_TENSOR(out_tensor_h);
if (num_invoked_ == 1) {
- printf("output (type : 0x%.2X, size : %d bytes)\n", tensors[0].type, tensors[0].size);
+ printf("output (tensor type : 0x%.2X, tensor size : %d bytes, copy size: %d bytes)\n", tensors[0].type, tensors[0].size, output_in_bytes_);
}
- if (tensors[0].size != copy_bytes) {
- printf("incorrect size( %d, but expected : %d)\n", tensors[0].size, copy_bytes);
+ if (tensors[0].size != output_in_bytes_) {
+ printf("incorrect size( %d, but expected : %d)\n", tensors[0].size, output_in_bytes_);
return false;
}
- output_in_bytes_ = copy_bytes;
-
if (num_bytes_per_channel_ == UINT8_BYTES) {
if (tensors[0].type != BEYOND_TENSOR_TYPE_UINT8) {
printf("incorrect type( 0x%.2X, but expected : %d )\n", tensors[0].type, BEYOND_TENSOR_TYPE_UINT8);
uint8_t max_score = 0;
uint8_t scores[OUTPUT_LABELS_NUM];
- memcpy(&scores, tensors[0].data, copy_bytes);
+ memcpy(&scores, tensors[0].data, output_in_bytes_);
for (int i = 0; i < OUTPUT_LABELS_NUM; i++) {
if (scores[i] > 0 && scores[i] > max_score) {
float max_score = 0;
float scores[OUTPUT_LABELS_NUM];
- memcpy(&scores, tensors[0].data, tensors[0].size);
+ memcpy(&scores, tensors[0].data, output_in_bytes_);
for (int i = 0; i < OUTPUT_LABELS_NUM; i++) {
if (scores[i] > 0 && scores[i] > max_score) {
std::string start_string(std::to_string(start));
std::string end_string(std::to_string(end));
- printf("\n\n------------------------------\n");
+ printf("\n------------------------------\n");
printf("Input fps %d, elapsed %s sec\n", input_fps_, elapse.c_str());
printf("Inference %u times request, %u times reply (%d missed)\n", req_times_.size(), rep_times_.size(), drop_inference);
printf("Average Latency %s sec\n", average.c_str());