namespace InferenceEngineImpl {
namespace ARMNNImpl {
-enum {
- ARMNN_DATA_TYPE_FLOAT32 = 1,
- ARMNN_DATA_TYPE_INT32 = 2,
- ARMNN_DATA_TYPE_UINT8 = 3,
- ARMNN_DATA_TYPE_INT64 = 4,
- ARMNN_DATA_TYPE_STRING = 5,
- ARMNN_DATA_TYPE_BOOL = 6,
- ARMNN_DATA_TYPE_MAX
-};
-
InferenceARMNN::InferenceARMNN(void) :
mRuntime(nullptr, &armnn::IRuntime::Destroy),
mNetwork(armnn::INetworkPtr(nullptr, nullptr))
{
inference_tensor_data_type_e data_type;
- switch ((int)type) {
- case ARMNN_DATA_TYPE_FLOAT32:
+ LOGI("ENTER");
+
+ LOGI("data type = %d", (int)type);
+
+ switch (type) {
+ case armnn::DataType::Float32:
data_type = TENSOR_DATA_TYPE_FLOAT32;
break;
- case ARMNN_DATA_TYPE_UINT8:
+ case armnn::DataType::QuantisedAsymm8:
data_type = TENSOR_DATA_TYPE_UINT8;
break;
default:
break;
}
+ LOGI("LEAVE");
+
return data_type;
}