Add Signed64 tensor data type support 52/242452/1 accepted/tizen_6.0_unified accepted/tizen_6.0_unified_hotfix tizen_6.0 tizen_6.0_hotfix accepted/tizen/6.0/unified/20201030.121051 accepted/tizen/6.0/unified/hotfix/20201103.051548 accepted/tizen/unified/20200911.043126 accepted/tizen/unified/20201112.124207 submit/tizen/20200828.025650 submit/tizen/20200910.024357 submit/tizen/20201104.021236 submit/tizen/20201109.032237 submit/tizen/20201109.053646 submit/tizen/20201110.032259 submit/tizen_6.0/20201029.205103 submit/tizen_6.0_hotfix/20201102.192503 submit/tizen_6.0_hotfix/20201103.114803 tizen_6.0.m2_release
authorInki Dae <inki.dae@samsung.com>
Thu, 27 Aug 2020 05:38:42 +0000 (14:38 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 27 Aug 2020 05:38:42 +0000 (14:38 +0900)
Change-Id: Iae20f462062d155c3d7fc2891feb4fa989820a96
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp

index 69dcde1cecfb86d17a01382f0263b56062fcfc01..c4d5e50e53684a3f8e1f33bb868092bc2fdac8ea 100644 (file)
@@ -78,6 +78,9 @@ namespace ARMNNImpl
                case armnn::DataType::QuantisedAsymm8:
                        data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
                        break;
+               case armnn::DataType::Signed64:
+                       data_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
+                       break;
                default:
                        LOGE("Invalid Input tensor type so it will use float32 in default.");
                        data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;