tensorConfig.mTensorInfo.height = layerInfo.dims[1];
} else {
LOGE("Invalid shape type[%d]", layerInfo.shapeType);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
if (!inputMeta.option.empty()) {
return INFERENCE_ENGINE_ERROR_NONE;
}
-void CopyRandomMatrixToMemory(inference_engine_tensor_buffer& buffer, InferenceConfig tensorConfig)
+int CopyRandomMatrixToMemory(inference_engine_tensor_buffer& buffer, InferenceConfig tensorConfig)
{
+ if (tensorConfig.mDataType <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
+ tensorConfig.mDataType >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
+ LOGE("tensorConfig.mDataType [%d] is not supported", tensorConfig.mDataType);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
std::random_device rd;
std::mt19937 generator(rd());
std::uniform_real_distribution<> distribution(1.0, 255.0);
for (int w_offset = 0; w_offset < width; w_offset++)
for (int ch_offset = 0; ch_offset < ch; ch_offset++) {
int offset = h_offset * width * ch + w_offset * ch + ch_offset;
- static_cast<float*>(buffer.buffer)[offset] = distribution(generator);
+ if (tensorConfig.mDataType == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+ static_cast<float*>(buffer.buffer)[offset] = distribution(generator);
+ else
+ static_cast<char*>(buffer.buffer)[offset] = distribution(generator);
}
+ return INFERENCE_ENGINE_ERROR_NONE;
}
static gboolean process(std::vector<std::string>& model_paths,
for (auto& input : inputs) {
LOGI("input.second.size :[%zu]", input.second.size);
- CopyRandomMatrixToMemory(input.second, tensorConfig);
+ ret = CopyRandomMatrixToMemory(input.second, tensorConfig);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("CopyRandomMatrixToMemory failed");
+ return FALSE;
+ }
}
std::chrono::system_clock::time_point StartTime =