Update the CL pin to the latest master
authorFerran Balaguer <ferran.balaguer@arm.com>
Mon, 24 Jun 2019 11:43:38 +0000 (12:43 +0100)
committerFerran Balaguer Arm <ferran.balaguer@arm.com>
Wed, 26 Jun 2019 15:22:31 +0000 (15:22 +0000)
* Update SoftMaxUint8Workload for CL and NEON to deal with
  quantization parameters as vectors.
* Change Sigmoid Activation function QAsymm8 tests to use
  scale=1.f/256.f and offset=0 as quatization output parameters.

!android-nn-driver:1417

Signed-off-by: Ferran Balaguer <ferran.balaguer@arm.com>
Change-Id: Ief91f10193fbbbad0c0124ece41f0bf4e0dcd992

scripts/get_compute_library.sh
src/backends/backendsCommon/test/ActivationTestImpl.hpp
src/backends/cl/workloads/ClSoftmaxUint8Workload.cpp
src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp

index 391168a..ec84209 100755 (executable)
@@ -10,7 +10,7 @@ CMD=$( basename $0 )
 #DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_05" # Release 19.05
 #
 # For pinning to a revision use this:
-DEFAULT_CLFRAMEWORKREVISION="d7dd15c445397ab879439de6659859db09f4b752"
+DEFAULT_CLFRAMEWORKREVISION="3689fcd5915cd902cb4ea5f618f2a6e42f6dc4a1"
 
 usage() {
     echo "Usage: $CMD (Use the default clframework SHA)"
index 9088d18..282e643 100644 (file)
@@ -392,9 +392,11 @@ LayerTestResult<T, 4> SimpleActivationTest(
     armnn::ActivationFunction activationFunction,
     float activationParameterA,
     float activationParameterB,
-    float qScale,
-    int32_t qOffset,
+    float scale,
+    int32_t offset,
     const std::vector<float>& inputData,
+    float outScale,
+    int32_t outOffset,
     const std::vector<float>& outputExpectedData)
 {
     constexpr static unsigned int inputWidth = 16u;
@@ -413,15 +415,15 @@ LayerTestResult<T, 4> SimpleActivationTest(
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
     {
-        inputTensorInfo.SetQuantizationScale(qScale);
-        inputTensorInfo.SetQuantizationOffset(qOffset);
-        outputTensorInfo.SetQuantizationScale(qScale);
-        outputTensorInfo.SetQuantizationOffset(qOffset);
+        inputTensorInfo.SetQuantizationScale(scale);
+        inputTensorInfo.SetQuantizationOffset(offset);
+        outputTensorInfo.SetQuantizationScale(outScale);
+        outputTensorInfo.SetQuantizationOffset(outOffset);
     }
 
     LayerTestResult<T, 4> result(inputTensorInfo);
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -448,7 +450,8 @@ LayerTestResult<T, 4> SimpleActivationTest(
     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
+                                                                                  outputExpectedData));
 
     return result;
 }
@@ -483,6 +486,8 @@ LayerTestResult<T, 4> SimpleSigmoidTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           1.f / 256.f,
+                                           0,
                                            outputExpectedData);
 }
 
@@ -537,6 +542,8 @@ LayerTestResult<T, 4> ReLuTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -594,6 +601,8 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -636,6 +645,8 @@ LayerTestResult<T, 4> SoftReLuTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -691,6 +702,8 @@ LayerTestResult<T, 4> LeakyReLuTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -745,6 +758,8 @@ LayerTestResult<T, 4> AbsTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -799,6 +814,8 @@ LayerTestResult<T, 4> SqrtTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -853,6 +870,8 @@ LayerTestResult<T, 4> SquareTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
@@ -909,6 +928,8 @@ LayerTestResult<T, 4> TanhTestCommon(
                                            qScale,
                                            qOffset,
                                            inputData,
+                                           qScale,
+                                           qOffset,
                                            outputExpectedData);
 }
 
index 086f375..84d735c 100644 (file)
@@ -25,7 +25,10 @@ ClSoftmaxUint8Workload::ClSoftmaxUint8Workload(const SoftmaxQueueDescriptor& des
 
     const auto outputQuantization = output.info()->quantization_info();
 
-    if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+    if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+        ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+        (outputQuantization.scale.empty()) ||
+        (outputQuantization.offset.empty()))
     {
         throw InvalidArgumentException(
             "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
index 7b2d290..d1e49d9 100644 (file)
@@ -25,11 +25,15 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
 
     const auto outputQuantization = output.info()->quantization_info();
 
-    if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+    if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+        ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+        (outputQuantization.scale.empty()) ||
+        (outputQuantization.offset.empty()))
     {
         throw InvalidArgumentException(
             "Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
     }
+
     unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
 
     auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);