From: James Conroy Date: Thu, 4 Jul 2019 15:56:44 +0000 (+0100) Subject: IVGCVSW-3401 Update ACL pin to latest master X-Git-Tag: submit/tizen/20200316.035456~472 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=33fa0a66a57ddfd1896478301de7ee047aae5e89;p=platform%2Fupstream%2Farmnn.git IVGCVSW-3401 Update ACL pin to latest master * Updated ACL pin to latest master. * Minor changes to Softmax Neon/CL uint8 workloads to reflect refactoring in ACL. !android-nn-driver:1476 Change-Id: I1c5005ddbcccdb41d8cb09d3fa61cf3ce0e9ffdb Signed-off-by: James Conroy --- diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh index ec84209..316e300 100755 --- a/scripts/get_compute_library.sh +++ b/scripts/get_compute_library.sh @@ -10,7 +10,7 @@ CMD=$( basename $0 ) #DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_05" # Release 19.05 # # For pinning to a revision use this: -DEFAULT_CLFRAMEWORKREVISION="3689fcd5915cd902cb4ea5f618f2a6e42f6dc4a1" +DEFAULT_CLFRAMEWORKREVISION="7bb56c6337997281df10fa28ad7924c921b920eb" usage() { echo "Usage: $CMD (Use the default clframework SHA)" diff --git a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp index 84d735c..ce2a9e6 100644 --- a/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp +++ b/src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp @@ -25,10 +25,9 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des const auto outputQuantization = output.info()->quantization_info(); - if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || - ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || - (outputQuantization.scale.empty()) || - (outputQuantization.offset.empty())) + if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) || + (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) || + outputQuantization.scale().empty() || outputQuantization.offset().empty()) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported"); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index d1e49d9..363c150 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -25,10 +25,9 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& const auto outputQuantization = output.info()->quantization_info(); - if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) || - ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) || - (outputQuantization.scale.empty()) || - (outputQuantization.offset.empty())) + if ((!outputQuantization.scale().empty() && outputQuantization.scale()[0] != (1.0f / 256.0f)) || + (!outputQuantization.offset().empty() && outputQuantization.offset()[0] != 0) || + outputQuantization.scale().empty() || outputQuantization.offset().empty()) { throw InvalidArgumentException( "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");