armnn::ActivationFunction activationFunction,
float activationParameterA,
float activationParameterB,
- float qScale,
- int32_t qOffset,
+ float scale,
+ int32_t offset,
const std::vector<float>& inputData,
+ float outScale,
+ int32_t outOffset,
const std::vector<float>& outputExpectedData)
{
constexpr static unsigned int inputWidth = 16u;
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
{
- inputTensorInfo.SetQuantizationScale(qScale);
- inputTensorInfo.SetQuantizationOffset(qOffset);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
+ inputTensorInfo.SetQuantizationScale(scale);
+ inputTensorInfo.SetQuantizationOffset(offset);
+ outputTensorInfo.SetQuantizationScale(outScale);
+ outputTensorInfo.SetQuantizationOffset(outOffset);
}
LayerTestResult<T, 4> result(inputTensorInfo);
- auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+ auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
// Calculated manually.
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
+ outputExpectedData));
return result;
}
qScale,
qOffset,
inputData,
+ 1.f / 256.f,
+ 0,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
qScale,
qOffset,
inputData,
+ qScale,
+ qOffset,
outputExpectedData);
}
const auto outputQuantization = output.info()->quantization_info();
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+ if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+ ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+ (outputQuantization.scale.empty()) ||
+ (outputQuantization.offset.empty()))
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
const auto outputQuantization = output.info()->quantization_info();
- if ((outputQuantization.scale != (1.0f / 256.0f)) || (outputQuantization.offset != 0))
+ if (((!outputQuantization.scale.empty()) && (outputQuantization.scale[0] != (1.0f / 256.0f))) ||
+ ((!outputQuantization.offset.empty()) && (outputQuantization.offset[0] != 0)) ||
+ (outputQuantization.scale.empty()) ||
+ (outputQuantization.offset.empty()))
{
throw InvalidArgumentException(
"Invalid quantization for output. Only scale = 1.0f / 256.0f and offset = 0 supported");
}
+
unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);