IVGCVSW-2467 Remove GetDataType<T> function
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / SoftmaxTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include "QuantizeHelper.hpp"
8 #include "WorkloadTestUtils.hpp"
9
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/TypesUtils.hpp>
13
14 #include <backendsCommon/CpuTensorHandle.hpp>
15 #include <backendsCommon/IBackendInternal.hpp>
16 #include <backendsCommon/WorkloadFactory.hpp>
17
18 #include <test/TensorHelpers.hpp>
19
20 #include <algorithm>
21
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
24     armnn::IWorkloadFactory& workloadFactory,
25     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26     float beta)
27 {
28     using std::exp;
29
30     armnn::TensorInfo inputTensorInfo;
31     armnn::TensorInfo outputTensorInfo;
32
33     unsigned int inputShape[] = { 2, 4 };
34
35     inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
36     float qScale = 1.f / 256.f;
37     int qOffset = 0;
38     inputTensorInfo.SetQuantizationScale(qScale);
39     inputTensorInfo.SetQuantizationOffset(qOffset);
40
41     outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
42     outputTensorInfo.SetQuantizationScale(qScale);
43     outputTensorInfo.SetQuantizationOffset(qOffset);
44
45     LayerTestResult<T, 2> ret(outputTensorInfo);
46
47     // Each row is independently softmax'd.
48     auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(
49         QuantizedVector<T>(qScale, 0, {
50             0.f, 1.f, 0.f, 0.f,
51             .5f, 0.f, 0.f, 0.f,
52         })));
53
54     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
55     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
56
57     armnn::SoftmaxQueueDescriptor data;
58     data.m_Parameters.m_Beta = beta;
59
60     armnn::WorkloadInfo info;
61     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
62     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
63
64     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
65
66     inputHandle->Allocate();
67     outputHandle->Allocate();
68     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
69
70     ExecuteWorkload(*workload, memoryManager);
71
72     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
73
74     float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
75         exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
76     float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
77     float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
78         exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
79     float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
80
81     ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(
82         QuantizedVector<T>(qScale, qOffset, {
83         x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
84         x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1
85         })));
86
87     return ret;
88 }
89
90 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
91 LayerTestResult<T, 2> CompareSoftmaxTestImpl(
92     armnn::IWorkloadFactory& workloadFactory,
93     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
94     armnn::IWorkloadFactory& refWorkloadFactory,
95     float beta)
96 {
97
98     const int batchSize = 20;
99     const int channels = 30;
100
101     armnn::TensorInfo inputTensorInfo;
102     armnn::TensorInfo outputTensorInfo;
103
104     unsigned int inputShape[] = { batchSize, channels };
105
106     inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
107     outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
108     float qScale = 1.f / 256.f;
109     int qOffset = 0;
110     inputTensorInfo.SetQuantizationScale(qScale);
111     inputTensorInfo.SetQuantizationOffset(qOffset);
112     outputTensorInfo.SetQuantizationScale(qScale);
113     outputTensorInfo.SetQuantizationOffset(qOffset);
114
115
116     LayerTestResult<T, 2> ret(outputTensorInfo);
117     auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
118
119     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
120     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
121
122     armnn::SoftmaxQueueDescriptor data;
123     data.m_Parameters.m_Beta = beta;
124
125     armnn::WorkloadInfo info;
126     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
127     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
128
129     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
130     std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
131
132
133     armnn::SoftmaxQueueDescriptor refData = data;
134     armnn::WorkloadInfo refInfo = info;
135     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
136     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
137
138     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
139     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
140
141     outputHandleRef->Allocate();
142     inputHandleRef->Allocate();
143
144     inputHandle->Allocate();
145     outputHandle->Allocate();
146
147     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
148     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
149
150     ExecuteWorkload(*workload, memoryManager);
151
152     workloadRef->Execute();
153
154     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
155     CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
156
157     return ret;
158 }