2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
9 #include <armnn/TypesUtils.hpp>
11 #include <test/TensorHelpers.hpp>
12 #include "QuantizeHelper.hpp"
14 #include <backends/CpuTensorHandle.hpp>
15 #include <backends/WorkloadFactory.hpp>
20 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta)
24 armnn::TensorInfo inputTensorInfo;
25 armnn::TensorInfo outputTensorInfo;
27 unsigned int inputShape[] = { 2, 4 };
29 inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
30 float qScale = 1.f / 256.f;
32 inputTensorInfo.SetQuantizationScale(qScale);
33 inputTensorInfo.SetQuantizationOffset(qOffset);
35 outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
36 outputTensorInfo.SetQuantizationScale(qScale);
37 outputTensorInfo.SetQuantizationOffset(qOffset);
39 LayerTestResult<T, 2> ret(outputTensorInfo);
41 // Each row is independently softmax'd.
42 auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(
43 QuantizedVector<T>(qScale, 0, {
48 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
49 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
51 armnn::SoftmaxQueueDescriptor data;
52 data.m_Parameters.m_Beta = beta;
54 armnn::WorkloadInfo info;
55 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
56 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
58 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
60 inputHandle->Allocate();
61 outputHandle->Allocate();
62 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
64 workloadFactory.Finalize();
67 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
69 float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
70 exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
71 float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
72 float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
73 exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
74 float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
76 ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(
77 QuantizedVector<T>(qScale, qOffset, {
78 x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
79 x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1
86 LayerTestResult<T, 2> CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory,
87 armnn::IWorkloadFactory& refWorkloadFactory,
91 const int batchSize = 20;
92 const int channels = 30;
94 armnn::TensorInfo inputTensorInfo;
95 armnn::TensorInfo outputTensorInfo;
97 unsigned int inputShape[] = { batchSize, channels };
99 inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
100 outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
101 float qScale = 1.f / 256.f;
103 inputTensorInfo.SetQuantizationScale(qScale);
104 inputTensorInfo.SetQuantizationOffset(qOffset);
105 outputTensorInfo.SetQuantizationScale(qScale);
106 outputTensorInfo.SetQuantizationOffset(qOffset);
109 LayerTestResult<T, 2> ret(outputTensorInfo);
110 auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
112 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
113 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
115 armnn::SoftmaxQueueDescriptor data;
116 data.m_Parameters.m_Beta = beta;
118 armnn::WorkloadInfo info;
119 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
120 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
122 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
123 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
126 armnn::SoftmaxQueueDescriptor refData = data;
127 armnn::WorkloadInfo refInfo = info;
128 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
129 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
131 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
132 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
134 outputHandleRef->Allocate();
135 inputHandleRef->Allocate();
137 inputHandle->Allocate();
138 outputHandle->Allocate();
140 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
141 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
143 workloadFactory.Finalize();
145 refWorkloadFactory.Finalize();
146 workloadRef->Execute();
148 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
149 CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());