2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "QuantizeHelper.hpp"
9 #include <armnn/ArmNN.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/TypesUtils.hpp>
13 #include <backendsCommon/CpuTensorHandle.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
16 #include <test/TensorHelpers.hpp>
21 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta)
25 armnn::TensorInfo inputTensorInfo;
26 armnn::TensorInfo outputTensorInfo;
28 unsigned int inputShape[] = { 2, 4 };
30 inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
31 float qScale = 1.f / 256.f;
33 inputTensorInfo.SetQuantizationScale(qScale);
34 inputTensorInfo.SetQuantizationOffset(qOffset);
36 outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
37 outputTensorInfo.SetQuantizationScale(qScale);
38 outputTensorInfo.SetQuantizationOffset(qOffset);
40 LayerTestResult<T, 2> ret(outputTensorInfo);
42 // Each row is independently softmax'd.
43 auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(
44 QuantizedVector<T>(qScale, 0, {
49 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
50 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
52 armnn::SoftmaxQueueDescriptor data;
53 data.m_Parameters.m_Beta = beta;
55 armnn::WorkloadInfo info;
56 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
57 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
59 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
61 inputHandle->Allocate();
62 outputHandle->Allocate();
63 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
65 workloadFactory.Finalize();
68 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
70 float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
71 exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
72 float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
73 float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
74 exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
75 float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
77 ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(
78 QuantizedVector<T>(qScale, qOffset, {
79 x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
80 x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1
87 LayerTestResult<T, 2> CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory,
88 armnn::IWorkloadFactory& refWorkloadFactory,
92 const int batchSize = 20;
93 const int channels = 30;
95 armnn::TensorInfo inputTensorInfo;
96 armnn::TensorInfo outputTensorInfo;
98 unsigned int inputShape[] = { batchSize, channels };
100 inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
101 outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
102 float qScale = 1.f / 256.f;
104 inputTensorInfo.SetQuantizationScale(qScale);
105 inputTensorInfo.SetQuantizationOffset(qOffset);
106 outputTensorInfo.SetQuantizationScale(qScale);
107 outputTensorInfo.SetQuantizationOffset(qOffset);
110 LayerTestResult<T, 2> ret(outputTensorInfo);
111 auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
113 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
114 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
116 armnn::SoftmaxQueueDescriptor data;
117 data.m_Parameters.m_Beta = beta;
119 armnn::WorkloadInfo info;
120 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
121 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
123 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
124 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
127 armnn::SoftmaxQueueDescriptor refData = data;
128 armnn::WorkloadInfo refInfo = info;
129 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
130 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
132 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
133 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
135 outputHandleRef->Allocate();
136 inputHandleRef->Allocate();
138 inputHandle->Allocate();
139 outputHandle->Allocate();
141 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
142 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
144 workloadFactory.Finalize();
146 refWorkloadFactory.Finalize();
147 workloadRef->Execute();
149 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
150 CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());