2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "QuantizeHelper.hpp"
8 #include "WorkloadTestUtils.hpp"
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/TypesUtils.hpp>
14 #include <backendsCommon/CpuTensorHandle.hpp>
15 #include <backendsCommon/IBackendInternal.hpp>
16 #include <backendsCommon/WorkloadFactory.hpp>
18 #include <test/TensorHelpers.hpp>
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
30 armnn::TensorInfo inputTensorInfo;
31 armnn::TensorInfo outputTensorInfo;
33 unsigned int inputShape[] = { 2, 4 };
35 inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
36 float qScale = 1.f / 256.f;
38 inputTensorInfo.SetQuantizationScale(qScale);
39 inputTensorInfo.SetQuantizationOffset(qOffset);
41 outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
42 outputTensorInfo.SetQuantizationScale(qScale);
43 outputTensorInfo.SetQuantizationOffset(qOffset);
45 LayerTestResult<T, 2> ret(outputTensorInfo);
47 // Each row is independently softmax'd.
48 auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(
49 QuantizedVector<T>(qScale, 0, {
54 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
55 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
57 armnn::SoftmaxQueueDescriptor data;
58 data.m_Parameters.m_Beta = beta;
60 armnn::WorkloadInfo info;
61 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
62 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
64 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
66 inputHandle->Allocate();
67 outputHandle->Allocate();
68 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
70 ExecuteWorkload(*workload, memoryManager);
72 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
74 float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
75 exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
76 float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
77 float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
78 exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
79 float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
81 ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(
82 QuantizedVector<T>(qScale, qOffset, {
83 x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
84 x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1
90 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
91 LayerTestResult<T, 2> CompareSoftmaxTestImpl(
92 armnn::IWorkloadFactory& workloadFactory,
93 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
94 armnn::IWorkloadFactory& refWorkloadFactory,
98 const int batchSize = 20;
99 const int channels = 30;
101 armnn::TensorInfo inputTensorInfo;
102 armnn::TensorInfo outputTensorInfo;
104 unsigned int inputShape[] = { batchSize, channels };
106 inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
107 outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
108 float qScale = 1.f / 256.f;
110 inputTensorInfo.SetQuantizationScale(qScale);
111 inputTensorInfo.SetQuantizationOffset(qOffset);
112 outputTensorInfo.SetQuantizationScale(qScale);
113 outputTensorInfo.SetQuantizationOffset(qOffset);
116 LayerTestResult<T, 2> ret(outputTensorInfo);
117 auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
119 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
120 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
122 armnn::SoftmaxQueueDescriptor data;
123 data.m_Parameters.m_Beta = beta;
125 armnn::WorkloadInfo info;
126 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
127 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
129 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
130 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
133 armnn::SoftmaxQueueDescriptor refData = data;
134 armnn::WorkloadInfo refInfo = info;
135 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
136 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
138 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
139 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
141 outputHandleRef->Allocate();
142 inputHandleRef->Allocate();
144 inputHandle->Allocate();
145 outputHandle->Allocate();
147 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
148 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
150 ExecuteWorkload(*workload, memoryManager);
152 workloadRef->Execute();
154 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
155 CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());