IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / SoftmaxTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include "QuantizeHelper.hpp"
8
9 #include <armnn/ArmNN.hpp>
10 #include <armnn/Tensor.hpp>
11 #include <armnn/TypesUtils.hpp>
12
13 #include <backendsCommon/CpuTensorHandle.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
15
16 #include <test/TensorHelpers.hpp>
17
18 #include <algorithm>
19
20 template<typename T>
21 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta)
22 {
23     using std::exp;
24
25     armnn::TensorInfo inputTensorInfo;
26     armnn::TensorInfo outputTensorInfo;
27
28     unsigned int inputShape[] = { 2, 4 };
29
30     inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
31     float qScale = 1.f / 256.f;
32     int qOffset = 0;
33     inputTensorInfo.SetQuantizationScale(qScale);
34     inputTensorInfo.SetQuantizationOffset(qOffset);
35
36     outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
37     outputTensorInfo.SetQuantizationScale(qScale);
38     outputTensorInfo.SetQuantizationOffset(qOffset);
39
40     LayerTestResult<T, 2> ret(outputTensorInfo);
41
42     // Each row is independently softmax'd.
43     auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(
44         QuantizedVector<T>(qScale, 0, {
45             0.f, 1.f, 0.f, 0.f,
46             .5f, 0.f, 0.f, 0.f,
47         })));
48
49     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
50     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
51
52     armnn::SoftmaxQueueDescriptor data;
53     data.m_Parameters.m_Beta = beta;
54
55     armnn::WorkloadInfo info;
56     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
57     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
58
59     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
60
61     inputHandle->Allocate();
62     outputHandle->Allocate();
63     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
64
65     workloadFactory.Finalize();
66     workload->Execute();
67
68     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
69
70     float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
71         exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
72     float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
73     float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
74         exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
75     float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
76
77     ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(
78         QuantizedVector<T>(qScale, qOffset, {
79         x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
80         x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1
81         })));
82
83     return ret;
84 }
85
86 template<typename T>
87 LayerTestResult<T, 2> CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory,
88     armnn::IWorkloadFactory& refWorkloadFactory,
89     float beta)
90 {
91
92     const int batchSize = 20;
93     const int channels = 30;
94
95     armnn::TensorInfo inputTensorInfo;
96     armnn::TensorInfo outputTensorInfo;
97
98     unsigned int inputShape[] = { batchSize, channels };
99
100     inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
101     outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
102     float qScale = 1.f / 256.f;
103     int qOffset = 0;
104     inputTensorInfo.SetQuantizationScale(qScale);
105     inputTensorInfo.SetQuantizationOffset(qOffset);
106     outputTensorInfo.SetQuantizationScale(qScale);
107     outputTensorInfo.SetQuantizationOffset(qOffset);
108
109
110     LayerTestResult<T, 2> ret(outputTensorInfo);
111     auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
112
113     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
114     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
115
116     armnn::SoftmaxQueueDescriptor data;
117     data.m_Parameters.m_Beta = beta;
118
119     armnn::WorkloadInfo info;
120     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
121     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
122
123     std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
124     std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
125
126
127     armnn::SoftmaxQueueDescriptor refData = data;
128     armnn::WorkloadInfo refInfo = info;
129     SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
130     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
131
132     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
133     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
134
135     outputHandleRef->Allocate();
136     inputHandleRef->Allocate();
137
138     inputHandle->Allocate();
139     outputHandle->Allocate();
140
141     CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
142     CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
143
144     workloadFactory.Finalize();
145     workload->Execute();
146     refWorkloadFactory.Finalize();
147     workloadRef->Execute();
148
149     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
150     CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
151
152     return ret;
153 }