IVGCVSW-4246 Clean build of LayerTests with -Wextra
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / layerTests / LstmTestImpl.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "LstmTestImpl.hpp"
7
8 #include <QuantizeHelper.hpp>
9
10 #include <armnn/ArmNN.hpp>
11
12 #include <backendsCommon/CpuTensorHandle.hpp>
13
14 #include <backendsCommon/test/TensorCopyUtils.hpp>
15 #include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17 #include <reference/workloads/Decoders.hpp>
18 #include <reference/workloads/Encoders.hpp>
19 #include <reference/workloads/LstmUtils.hpp>
20
21 #include <test/TensorHelpers.hpp>
22
23 #include <boost/multi_array.hpp>
24
25 namespace
26 {
27
28 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
29 void LstmUtilsVectorBatchVectorAddTestImpl(
30         boost::multi_array<float, 1>& vec,
31         boost::multi_array<float, 2>& batchVec,
32         uint32_t vSize,
33         uint32_t nBatch,
34         boost::multi_array<float, 2>& expectedOutput )
35 {
36     float qScale = 0.0f;
37     int32_t qOffset = 0;
38     armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType,  qScale, qOffset );
39
40     // Make encoder and decoder
41     std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
42     std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
43     std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
44
45     VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
46
47     // check shape and compare values
48     BOOST_TEST(CompareTensors(batchVec, expectedOutput));
49
50     // check if iterator is back at start position
51     batchVecEncoder->Set(1.0f);
52     BOOST_TEST(batchVec[0][0] == 1.0f);
53 }
54
55 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
56 void LstmUtilsZeroVectorTestImpl(
57         boost::multi_array<float, 1>& input,
58         uint32_t vSize,
59         boost::multi_array<float, 1>& expectedOutput)
60 {
61     float qScale = 0.0f;
62     int32_t qOffset = 0;
63
64     armnn::TensorInfo tensorInfo({vSize}, ArmnnType,  qScale, qOffset );
65
66     // Make encoder for input
67     std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
68
69     // call ZeroVector
70     ZeroVector(*outputEncoder, vSize);
71
72     // check shape and compare values
73     BOOST_TEST(CompareTensors(input, expectedOutput));
74
75     // check if iterator is back at start position
76     outputEncoder->Set(1.0f);
77     BOOST_TEST(input[0] == 1.0f);
78
79 }
80
81 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
82 void LstmUtilsMeanStddevNormalizationTestImpl(
83         boost::multi_array<float, 2>& input,
84         uint32_t vSize,
85         uint32_t nBatch,
86         boost::multi_array<float, 2>& expectedOutput)
87 {
88     float qScale = 0.0f;
89     int32_t qOffset = 0;
90     armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType,  qScale, qOffset );
91
92     // Make encoder and decoder for input
93     std::unique_ptr<armnn::Decoder<float>> inputDecoder = armnn::MakeDecoder<float>(tensorInfo, input.data());
94     std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
95
96     MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
97
98     // check shape and compare values
99     BOOST_TEST(CompareTensors(input, expectedOutput));
100
101     // check if iterator is back at start position
102     outputEncoder->Set(1.0f);
103     BOOST_TEST(input[0][0] == 1.0f);
104 }
105
106 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
107 void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
108         boost::multi_array<float, 1>& vec,
109         boost::multi_array<float, 2>& batchVec,
110         uint32_t vSize,
111         uint32_t nBatch,
112         boost::multi_array<float, 2>& expectedOutput)
113 {
114     float qScale = 0.0f;
115     int32_t qOffset = 0;
116     armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType,  qScale, qOffset );
117
118     // Make encoder and decoder
119     std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
120     std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
121     std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
122
123     VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
124
125     // check shape and compare values
126     BOOST_TEST(CompareTensors(batchVec, expectedOutput));
127
128     // check if iterator is back at start position
129     batchVecEncoder->Set(1.0f);
130     BOOST_TEST(batchVec[0][0] == 1.0f);
131 }
132
133 // Lstm Layer tests:
134 // *********************************** //
135 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
136 LayerTestResult<T, 2>
137 LstmNoCifgNoPeepholeNoProjectionTestImpl(
138         armnn::IWorkloadFactory& workloadFactory,
139         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
140         const boost::multi_array<T, 2>& input,
141         const boost::multi_array<T, 2>& outputExpected,
142         float qScale = 0.0f,
143         int32_t qOffset = 0,
144         armnn::DataType constantDataType = armnn::DataType::Float32)
145 {
146     boost::ignore_unused(memoryManager);
147     unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
148     unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
149     unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
150     // cellSize and outputSize have the same size when there is no projection.
151     unsigned numUnits = outputSize;
152
153     armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType,  qScale, qOffset );
154     armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
155     armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
156
157     armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
158     armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
159     armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
160     armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
161
162     LayerTestResult<T, 2> ret(outputTensorInfo);
163
164     std::vector<float> inputVector;
165     inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
166     auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
167
168     std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
169     auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
170
171     std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
172     auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
173
174     std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
175     auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
176
177     std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
178     auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
179
180     std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
181     auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
182
183     std::vector<float> outputVector;
184     outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
185     ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
186
187     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
188     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
189             workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
190     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
191             workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
192
193     std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
194     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
195             workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
196     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
197             workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
198     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
199
200
201     armnn::LstmQueueDescriptor data;
202     armnn::WorkloadInfo info;
203
204     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
205     AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
206     AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
207
208     AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
209     AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
210     AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
211     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
212
213     armnn::TensorInfo tensorInfo4({numUnits}, constantDataType , qScale, qOffset);
214     armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
215     armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
216
217     auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
218                                                                   -0.34550029f, 0.04266912f, -0.15680569f,
219                                                                   -0.34856534f, 0.43890524f});
220
221     auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f,
222                                                                    -0.31343272f, -0.40032279f, 0.44781327f,
223                                                                    0.01387155f, -0.35593212f});
224
225     auto inputToCellWeights = MakeTensor<float, 2>(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
226                                                                  -0.20583314f, 0.44344562f, 0.22077113f,
227                                                                  -0.29909778f});
228
229     auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f,
230                                                                    0.40525138f, 0.44272184f, 0.03897077f,
231                                                                    -0.1556896f, 0.19487578f});
232
233     auto recurrentToInputWeights = MakeTensor<float, 2>(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f,
234                                                                        -0.35746509f, 0.28902304f, 0.08183324f,
235                                                                        -0.16555229f, 0.02286911f, -0.13566875f,
236                                                                        0.03034258f, 0.48091322f, -0.12528998f,
237                                                                        0.24077177f, -0.51332325f, -0.33502164f,
238                                                                        0.10629296f});
239
240     auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f,
241                                                                         0.2112639f, 0.27654213f, 0.20864892f,
242                                                                         -0.07646349f, 0.45877004f, 0.00141793f,
243                                                                         -0.14609534f, 0.36447752f, 0.09196436f,
244                                                                         0.28053468f, 0.01560611f, -0.20127171f,
245                                                                         -0.01140004f});
246
247     auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f,
248                                                                       0.26320225f, 0.05695659f, -0.00123841f,
249                                                                       -0.4744786f, -0.35869038f, -0.06418842f,
250                                                                       -0.13502428f, -0.501764f, 0.22830659f,
251                                                                       -0.46367589f, 0.26016325f, -0.03894562f,
252                                                                       -0.16368064f});
253
254     auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f,
255                                                                         0.09215671f, 0.24107647f, -0.39835793f,
256                                                                         0.18212086f, 0.01301402f, 0.48572797f,
257                                                                         -0.50656658f, 0.20047462f, -0.20607421f,
258                                                                         -0.51818722f, -0.15390486f, 0.0468148f,
259                                                                         0.39922136f});
260
261     auto cellToInputWeights = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
262
263     auto inputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
264
265     auto forgetGateBias = MakeTensor<float, 1>(tensorInfo4, {1., 1., 1., 1.});
266
267     auto cellBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
268
269     auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
270
271     armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
272     armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
273     armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
274     armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
275     armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
276     armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
277     armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
278     armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
279     armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
280     armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
281     armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
282     armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
283     armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
284
285     AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
286     AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
287     AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
288     AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
289     AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
290     AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
291     AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
292     AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
293     AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
294     AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
295     AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
296     AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
297     AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
298
299     data.m_InputToInputWeights = &inputToInputWeightsTensor;
300     data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
301     data.m_InputToCellWeights = &inputToCellWeightsTensor;
302     data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
303     data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
304     data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
305     data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
306     data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
307     data.m_InputGateBias = &inputGateBiasTensor;
308     data.m_ForgetGateBias = &forgetGateBiasTensor;
309     data.m_CellBias = &cellBiasTensor;
310     data.m_OutputGateBias = &outputGateBiasTensor;
311
312     // Flags to set test configuration
313     data.m_Parameters.m_ActivationFunc = 4;
314     data.m_Parameters.m_CifgEnabled = false;
315     data.m_Parameters.m_PeepholeEnabled = false;
316     data.m_Parameters.m_ProjectionEnabled = false;
317
318     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
319     inputHandle->Allocate();
320     outputStateInHandle->Allocate();
321     cellStateInHandle->Allocate();
322
323     scratchHandle->Allocate();
324     outputStateOutHandle->Allocate();
325     cellStateOutHandle->Allocate();
326     outputHandle->Allocate();
327
328     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
329     CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
330     CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
331
332     workload->Execute();
333
334     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
335
336     return ret;
337 }
338
339 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
340 LayerTestResult<T, 2>
341 LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
342                                                   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343                                                   const boost::multi_array<T, 2>& input,
344                                                   const boost::multi_array<T, 2>& outputExpected,
345                                                   float qScale = 0.0f,
346                                                   int32_t qOffset = 0,
347                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
348 {
349     boost::ignore_unused(memoryManager);
350     unsigned int batchSize = 2;
351     unsigned int outputSize = 16;
352     unsigned int inputSize = 5;
353     unsigned numUnits = 20;
354
355     armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
356     armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
357     armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
358
359     // Scratch buffer size without CIFG [batchSize, numUnits * 4]
360     armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
361     armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
362     armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
363     armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
364
365     LayerTestResult<T, 2> ret(outputTensorInfo);
366
367     std::vector<float> inputVector;
368     inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
369     auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
370
371     std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
372     auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
373
374     std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
375     auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
376
377     std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
378     auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
379
380     std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
381     auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
382
383     std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
384     auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
385
386     std::vector<float> outputVector;
387     outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
388     ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
389
390     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
391     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
392             workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
393     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
394             workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
395
396     std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
397     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
398             workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
399     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
400             workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
401     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
402
403     armnn::LstmQueueDescriptor data;
404     armnn::WorkloadInfo info;
405
406     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
407     AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
408     AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
409
410     AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
411     AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
412     AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
413     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
414
415     armnn::TensorInfo tensorInfo16({outputSize}, constantDataType, qScale, qOffset);
416     armnn::TensorInfo tensorInfo20({numUnits}, constantDataType, qScale, qOffset);
417     armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
418     armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset);
419     armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset);
420
421     auto inputToInputWeights =
422             MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f,  0.046905167f,-0.014657677f,-0.03149463f,
423                                                   0.09171803f, 0.14647801f,0.10797193f,   -0.0057968358f,0.0019193048f,
424                                                   -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
425                                                   -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
426                                                   -0.008045952f,0.015478081f, 0.055217247f,  0.038719587f, 0.044153627f,
427                                                   -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
428                                                   -0.1671009f,   -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
429                                                   0.25005487f, -0.22790983f, 0.009855087f,  -0.028140958f, -0.11200698f,
430                                                   0.11295408f, -0.0035217577f, 0.054485075f,  0.05184695f, 0.064711206f,
431                                                   0.10989193f,   0.11674786f,  0.03490607f, 0.07727357f, 0.11390585f,
432                                                   -0.1863375f,  -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
433                                                   0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f,    0.14545603f,
434                                                   -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
435                                                   -0.042484224f, -0.11827596f, -0.09171104f,  -0.10808628f,-0.16327988f,
436                                                   -0.2273378f,   -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
437                                                   0.0038534778f, 0.054764505f,   0.089753784f, 0.06947234f, 0.08014476f,
438                                                   -0.04544234f, -0.0497073f,-0.07135631f,  -0.048929106f,-0.004042012f,
439                                                   -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
440                                                   -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
441                                                   -0.39292613f,  -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f
442             });
443
444     auto inputToForgetWeights =
445             MakeTensor<float, 2>(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
446                                                    -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
447                                                    -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
448                                                    0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
449                                                    0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
450                                                    -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
451                                                    -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
452                                                    0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
453                                                    0.06958324f,    0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
454                                                    0.12784666f,    0.07077897f,  0.025725935f, 0.04165009f,0.07241905f,
455                                                    0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
456                                                    -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
457                                                    0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
458                                                    -0.08402166f,-0.01901462f,  -0.044678304f,-0.07720565f,0.014350063f,
459                                                    -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
460                                                    0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
461                                                    0.036881298f,   0.02913376f,  0.03420159f,0.05448447f,-0.054523353f,
462                                                    0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
463                                                    -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
464                                                    0.0001771948f,  -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f
465             });
466
467     auto inputToCellWeights =
468             MakeTensor<float, 2>(tensorInfo20x5, {-0.04580283f,   -0.09549462f,   -0.032418985f,  -0.06454633f,
469                                                   -0.043528453f,  0.043018587f,   -0.049152344f,  -0.12418144f,
470                                                   -0.078985475f,  -0.07596889f,   0.019484362f,   -0.11434962f,
471                                                   -0.0074034138f, -0.06314844f,   -0.092981495f,  0.0062155537f,
472                                                   -0.025034338f,  -0.0028890965f, 0.048929527f,   0.06235075f,
473                                                   0.10665918f,    -0.032036792f,  -0.08505916f,   -0.10843358f,
474                                                   -0.13002433f,   -0.036816437f,  -0.02130134f,   -0.016518239f,
475                                                   0.0047691227f,  -0.0025825808f, 0.066017866f,   0.029991534f,
476                                                   -0.10652836f,   -0.1037554f,    -0.13056071f,   -0.03266643f,
477                                                   -0.033702414f,  -0.006473424f,  -0.04611692f,   0.014419339f,
478                                                   -0.025174323f,  0.0396852f,     0.081777506f,   0.06157468f,
479                                                   0.10210095f,    -0.009658194f,  0.046511717f,   0.03603906f,
480                                                   0.0069369148f,  0.015960095f,   -0.06507666f,   0.09551598f,
481                                                   0.053568836f,   0.06408714f,    0.12835667f,    -0.008714329f,
482                                                   -0.20211966f,   -0.12093674f,   0.029450472f,   0.2849013f,
483                                                   -0.029227901f,  0.1164364f,     -0.08560263f,   0.09941786f,
484                                                   -0.036999565f,  -0.028842626f,  -0.0033637602f, -0.017012902f,
485                                                   -0.09720865f,   -0.11193351f,   -0.029155117f,  -0.017936034f,
486                                                   -0.009768936f,  -0.04223324f,   -0.036159635f,  0.06505112f,
487                                                   -0.021742892f,  -0.023377212f,  -0.07221364f,   -0.06430552f,
488                                                   0.05453865f,    0.091149814f,   0.06387331f,    0.007518393f,
489                                                   0.055960953f,   0.069779344f,   0.046411168f,   0.10509911f,
490                                                   0.07463894f,    0.0075130584f,  0.012850982f,   0.04555431f,
491                                                   0.056955688f,   0.06555285f,    0.050801456f,   -0.009862683f,
492                                                   0.00826772f,    -0.026555609f,  -0.0073611983f, -0.0014897042f
493             });
494
495     auto inputToOutputWeights =
496             MakeTensor<float, 2>(tensorInfo20x5, {-0.0998932f,   -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
497                                                   -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f,  -0.15093534f,
498                                                   0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
499                                                   -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
500                                                   -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
501                                                   0.10124236f,  0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
502                                                   -0.027833903f, 0.029774971f,  0.1130802f, 0.09218906f, 0.09506135f,
503                                                   -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
504                                                   -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
505                                                   -0.11366429f,  0.035777505f,  0.13568819f, 0.052451383f,0.050649304f,
506                                                   0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
507                                                   0.04974699f, 0.014160473f,  0.06973932f,    0.04964942f, 0.033364646f,
508                                                   0.08190124f,   0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
509                                                   -0.078907564f,-0.06707616f,  -0.11844508f, -0.09986688f,-0.07509403f,
510                                                   0.06263226f,   0.14925587f,   0.20188436f, 0.12098451f,0.14639415f,
511                                                   0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
512                                                   -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f,  0.021544158f,
513                                                   0.08949725f,  0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
514                                                   -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
515                                                   -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f
516             });
517
518     auto inputGateBias =
519             MakeTensor<float, 1>(tensorInfo20, {0.02234832f,  0.14757581f,   0.18176508f,  0.10380666f,  0.053110216f,
520                                                 -0.06928846f, -0.13942584f,  -0.11816189f, 0.19483899f,  0.03652339f,
521                                                 -0.10250295f, 0.036714908f,  -0.18426876f, 0.036065217f, 0.21810818f,
522                                                 0.02383196f,  -0.043370757f, 0.08690144f,  -0.04444982f, 0.00030581196f
523             });
524
525     auto forgetGateBias =
526             MakeTensor<float, 1>(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
527                                                 0.11098921f,  0.15378423f,   0.09263801f,  0.09790885f,
528                                                 0.09508917f,  0.061199076f,  0.07665568f,  -0.015443159f,
529                                                 -0.03499149f, 0.046190713f,  0.08895977f,  0.10899629f,
530                                                 0.40694186f,  0.06030037f,   0.012413437f, -0.06108739f
531             });
532
533     auto cellBias =
534             MakeTensor<float, 1>(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f,   0.033463873f,
535                                                 -0.1483596f,   -0.10639995f,  -0.091433935f, 0.058573797f,
536                                                 -0.06809782f,  -0.07889636f,  -0.043246906f, -0.09829136f,
537                                                 -0.4279842f,   0.034901652f,  0.18797937f,   0.0075234566f,
538                                                 0.016178843f,  0.1749513f,    0.13975595f,   0.92058027f
539             });
540
541     auto outputGateBias =
542             MakeTensor<float, 1>(tensorInfo20, {0.046159424f,  -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
543                                                 0.35373217f,   -0.018957434f,  0.008907322f, -0.0762701f, 0.12018895f,
544                                                 0.04216877f,   0.0022856654f,  0.040952638f,  0.3147856f,  0.08225149f,
545                                                 -0.057416286f, -0.14995944f,   -0.008040261f, 0.13208859f, 0.029760877f
546             });
547
548     auto recurrentToInputWeights =
549             MakeTensor<float, 2>(tensorInfo20x16, {-0.001374326f,   -0.078856036f,   0.10672688f,    0.029162422f,
550                                                    -0.11585556f,    0.02557986f,     -0.13446963f,   -0.035785314f,
551                                                    -0.01244275f,    0.025961924f,    -0.02337298f,   -0.044228926f,
552                                                    -0.055839065f,   -0.046598054f,   -0.010546039f,  -0.06900766f,
553                                                    0.027239809f,    0.022582639f,    -0.013296484f,  -0.05459212f,
554                                                    0.08981f,        -0.045407712f,   0.08682226f,    -0.06867011f,
555                                                    -0.14390695f,    -0.02916037f,    0.000996957f,   0.091420636f,
556                                                    0.14283475f,     -0.07390571f,    -0.06402044f,   0.062524505f,
557                                                    -0.093129106f,   0.04860203f,     -0.08364217f,   -0.08119002f,
558                                                    0.009352075f,    0.22920375f,     0.0016303885f,  0.11583097f,
559                                                    -0.13732095f,    0.012405723f,    -0.07551853f,   0.06343048f,
560                                                    0.12162708f,     -0.031923793f,   -0.014335606f,  0.01790974f,
561                                                    -0.10650317f,    -0.0724401f,     0.08554849f,    -0.05727212f,
562                                                    0.06556731f,     -0.042729504f,   -0.043227166f,  0.011683251f,
563                                                    -0.013082158f,   -0.029302018f,   -0.010899579f,  -0.062036745f,
564                                                    -0.022509435f,   -0.00964907f,    -0.01567329f,   0.04260106f,
565                                                    -0.07787477f,    -0.11576462f,    0.017356863f,   0.048673786f,
566                                                    -0.017577527f,   -0.05527947f,    -0.082487635f,  -0.040137455f,
567                                                    -0.10820036f,    -0.04666372f,    0.022746278f,   -0.07851417f,
568                                                    0.01068115f,     0.032956902f,    0.022433773f,   0.0026891115f,
569                                                    0.08944216f,     -0.0685835f,     0.010513544f,   0.07228705f,
570                                                    0.02032331f,     -0.059686817f,   -0.0005566496f, -0.086984694f,
571                                                    0.040414046f,    -0.1380399f,     0.094208956f,   -0.05722982f,
572                                                    0.012092817f,    -0.04989123f,    -0.086576f,     -0.003399834f,
573                                                    -0.04696032f,    -0.045747425f,   0.10091314f,    0.048676282f,
574                                                    -0.029037097f,   0.031399418f,    -0.0040285117f, 0.047237843f,
575                                                    0.09504992f,     0.041799378f,    -0.049185462f,  -0.031518843f,
576                                                    -0.10516937f,    0.026374253f,    0.10058866f,    -0.0033195973f,
577                                                    -0.041975245f,   0.0073591834f,   0.0033782164f,  -0.004325073f,
578                                                    -0.10167381f,    0.042500053f,    -0.01447153f,   0.06464186f,
579                                                    -0.017142897f,   0.03312627f,     0.009205989f,   0.024138335f,
580                                                    -0.011337001f,   0.035530265f,    -0.010912711f,  0.0706555f,
581                                                    -0.005894094f,   0.051841937f,    -0.1401738f,    -0.02351249f,
582                                                    0.0365468f,      0.07590991f,     0.08838724f,    0.021681072f,
583                                                    -0.10086113f,    0.019608743f,    -0.06195883f,   0.077335775f,
584                                                    0.023646897f,    -0.095322326f,   0.02233014f,    0.09756986f,
585                                                    -0.048691444f,   -0.009579111f,   0.07595467f,    0.11480546f,
586                                                    -0.09801813f,    0.019894179f,    0.08502348f,    0.004032281f,
587                                                    0.037211012f,    0.068537936f,    -0.048005626f,  -0.091520436f,
588                                                    -0.028379958f,   -0.01556313f,    0.06554592f,    -0.045599163f,
589                                                    -0.01672207f,    -0.020169014f,   -0.011877351f,  -0.20212261f,
590                                                    0.010889619f,    0.0047078193f,   0.038385306f,   0.08540671f,
591                                                    -0.017140968f,   -0.0035865551f,  0.016678626f,   0.005633034f,
592                                                    0.015963363f,    0.00871737f,     0.060130805f,   0.028611384f,
593                                                    0.10109069f,     -0.015060172f,   -0.07894427f,   0.06401885f,
594                                                    0.011584063f,    -0.024466386f,   0.0047652307f,  -0.09041358f,
595                                                    0.030737216f,    -0.0046374933f,  0.14215417f,    -0.11823516f,
596                                                    0.019899689f,    0.006106124f,    -0.027092824f,  0.0786356f,
597                                                    0.05052217f,     -0.058925f,      -0.011402121f,  -0.024987547f,
598                                                    -0.0013661642f,  -0.06832946f,    -0.015667673f,  -0.1083353f,
599                                                    -0.00096863037f, -0.06988685f,    -0.053350925f,  -0.027275559f,
600                                                    -0.033664223f,   -0.07978348f,    -0.025200296f,  -0.017207067f,
601                                                    -0.058403496f,   -0.055697463f,   0.005798788f,   0.12965427f,
602                                                    -0.062582195f,   0.0013350133f,   -0.10482091f,   0.0379771f,
603                                                    0.072521195f,    -0.0029455067f,  -0.13797039f,   -0.03628521f,
604                                                    0.013806405f,    -0.017858358f,   -0.01008298f,   -0.07700066f,
605                                                    -0.017081132f,   0.019358726f,    0.0027079724f,  0.004635139f,
606                                                    0.062634714f,    -0.02338735f,    -0.039547626f,  -0.02050681f,
607                                                    0.03385117f,     -0.083611414f,   0.002862572f,   -0.09421313f,
608                                                    0.058618143f,    -0.08598433f,    0.00972939f,    0.023867095f,
609                                                    -0.053934585f,   -0.023203006f,   0.07452513f,    -0.048767887f,
610                                                    -0.07314807f,    -0.056307215f,   -0.10433547f,   -0.06440842f,
611                                                    0.04328182f,     0.04389765f,     -0.020006588f,  -0.09076438f,
612                                                    -0.11652589f,    -0.021705797f,   0.03345259f,    -0.010329105f,
613                                                    -0.025767034f,   0.013057034f,    -0.07316461f,   -0.10145612f,
614                                                    0.06358255f,     0.18531723f,     0.07759293f,    0.12006465f,
615                                                    0.1305557f,      0.058638252f,    -0.03393652f,   0.09622831f,
616                                                    -0.16253184f,    -2.4580743e-06f, 0.079869635f,   -0.070196845f,
617                                                    -0.005644518f,   0.06857898f,     -0.12598175f,   -0.035084512f,
618                                                    0.03156317f,     -0.12794146f,    -0.031963028f,  0.04692781f,
619                                                    0.030070418f,    0.0071660685f,   -0.095516115f,  -0.004643372f,
620                                                    0.040170413f,    -0.062104587f,   -0.0037324072f, 0.0554317f,
621                                                    0.08184801f,     -0.019164372f,   0.06791302f,    0.034257166f,
622                                                    -0.10307039f,    0.021943003f,    0.046745934f,   0.0790918f,
623                                                    -0.0265588f,     -0.007824208f,   0.042546265f,   -0.00977924f,
624                                                    -0.0002440307f,  -0.017384544f,   -0.017990116f,  0.12252321f,
625                                                    -0.014512694f,   -0.08251313f,    0.08861942f,    0.13589665f,
626                                                    0.026351685f,    0.012641483f,    0.07466548f,    0.044301085f,
627                                                    -0.045414884f,   -0.051112458f,   0.03444247f,    -0.08502782f,
628                                                    -0.04106223f,    -0.028126027f,   0.028473156f,   0.10467447f
629             });
630
631     auto recurrentToForgetWeights =
632             MakeTensor<float, 2>(tensorInfo20x16, {-0.057784554f,  -0.026057621f,  -0.068447545f,   -0.022581743f,
633                                                    0.14811787f,    0.10826372f,    0.09471067f,     0.03987225f,
634                                                    -0.0039523416f, 0.00030638507f, 0.053185795f,    0.10572994f,
635                                                    0.08414449f,    -0.022036452f,  -0.00066928595f, -0.09203576f,
636                                                    0.032950465f,   -0.10985798f,   -0.023809856f,   0.0021431844f,
637                                                    -0.02196096f,   -0.00326074f,   0.00058621005f,  -0.074678116f,
638                                                    -0.06193199f,   0.055729095f,   0.03736828f,     0.020123724f,
639                                                    0.061878487f,   -0.04729229f,   0.034919553f,    -0.07585433f,
640                                                    -0.04421272f,   -0.044019096f,  0.085488975f,    0.04058006f,
641                                                    -0.06890133f,   -0.030951202f,  -0.024628663f,   -0.07672815f,
642                                                    0.034293607f,   0.08556707f,    -0.05293577f,    -0.033561368f,
643                                                    -0.04899627f,   0.0241671f,     0.015736353f,    -0.095442444f,
644                                                    -0.029564252f,  0.016493602f,   -0.035026584f,   0.022337519f,
645                                                    -0.026871363f,  0.004780428f,   0.0077918363f,   -0.03601621f,
646                                                    0.016435321f,   -0.03263031f,   -0.09543275f,    -0.047392778f,
647                                                    0.013454138f,   0.028934088f,   0.01685226f,     -0.086110644f,
648                                                    -0.046250615f,  -0.01847454f,   0.047608484f,    0.07339695f,
649                                                    0.034546845f,   -0.04881143f,   0.009128804f,    -0.08802852f,
650                                                    0.03761666f,    0.008096139f,   -0.014454086f,   0.014361001f,
651                                                    -0.023502491f,  -0.0011840804f, -0.07607001f,    0.001856849f,
652                                                    -0.06509276f,   -0.006021153f,  -0.08570962f,    -0.1451793f,
653                                                    0.060212336f,   0.055259194f,   0.06974018f,     0.049454916f,
654                                                    -0.027794661f,  -0.08077226f,   -0.016179763f,   0.1169753f,
655                                                    0.17213494f,    -0.0056326236f, -0.053934924f,   -0.0124349f,
656                                                    -0.11520337f,   0.05409887f,    0.088759385f,    0.0019655675f,
657                                                    0.0042065294f,  0.03881498f,    0.019844765f,    0.041858196f,
658                                                    -0.05695512f,   0.047233116f,   0.038937137f,    -0.06542224f,
659                                                    0.014429736f,   -0.09719407f,   0.13908425f,     -0.05379757f,
660                                                    0.012321099f,   0.082840554f,   -0.029899208f,   0.044217527f,
661                                                    0.059855383f,   0.07711018f,    -0.045319796f,   0.0948846f,
662                                                    -0.011724666f,  -0.0033288454f, -0.033542685f,   -0.04764985f,
663                                                    -0.13873616f,   0.040668588f,   0.034832682f,    -0.015319203f,
664                                                    -0.018715994f,  0.046002675f,   0.0599172f,      -0.043107376f,
665                                                    0.0294216f,     -0.002314414f,  -0.022424703f,   0.0030315618f,
666                                                    0.0014641669f,  0.0029166266f,  -0.11878115f,    0.013738511f,
667                                                    0.12375372f,    -0.0006038222f, 0.029104086f,    0.087442465f,
668                                                    0.052958444f,   0.07558703f,    0.04817258f,     0.044462286f,
669                                                    -0.015213451f,  -0.08783778f,   -0.0561384f,     -0.003008196f,
670                                                    0.047060397f,   -0.002058388f,  0.03429439f,     -0.018839769f,
671                                                    0.024734668f,   0.024614193f,   -0.042046934f,   0.09597743f,
672                                                    -0.0043254104f, 0.04320769f,    0.0064070094f,   -0.0019131786f,
673                                                    -0.02558259f,   -0.022822596f,  -0.023273505f,   -0.02464396f,
674                                                    -0.10991725f,   -0.006240552f,  0.0074488563f,   0.024044557f,
675                                                    0.04383914f,    -0.046476185f,  0.028658995f,    0.060410924f,
676                                                    0.050786525f,   0.009452605f,   -0.0073054377f,  -0.024810238f,
677                                                    0.0052906186f,  0.0066939713f,  -0.0020913032f,  0.014515517f,
678                                                    0.015898481f,   0.021362653f,   -0.030262267f,   0.016587038f,
679                                                    -0.011442813f,  0.041154444f,   -0.007631438f,   -0.03423484f,
680                                                    -0.010977775f,  0.036152758f,   0.0066366293f,   0.11915515f,
681                                                    0.02318443f,    -0.041350313f,  0.021485701f,    -0.10906167f,
682                                                    -0.028218046f,  -0.00954771f,   0.020531068f,    -0.11995105f,
683                                                    -0.03672871f,   0.024019798f,   0.014255957f,    -0.05221243f,
684                                                    -0.00661567f,   -0.04630967f,   0.033188973f,    0.10107534f,
685                                                    -0.014027541f,  0.030796422f,   -0.10270911f,    -0.035999842f,
686                                                    0.15443139f,    0.07684145f,    0.036571592f,    -0.035900835f,
687                                                    -0.0034699554f, 0.06209149f,    0.015920248f,    -0.031122351f,
688                                                    -0.03858649f,   0.01849943f,    0.13872518f,     0.01503974f,
689                                                    0.069941424f,   -0.06948533f,   -0.0088794185f,  0.061282158f,
690                                                    -0.047401894f,  0.03100163f,    -0.041533746f,   -0.10430945f,
691                                                    0.044574402f,   -0.01425562f,   -0.024290353f,   0.034563623f,
692                                                    0.05866852f,    0.023947537f,   -0.09445152f,    0.035450947f,
693                                                    0.02247216f,    -0.0042998926f, 0.061146557f,    -0.10250651f,
694                                                    0.020881841f,   -0.06747029f,   0.10062043f,     -0.0023941975f,
695                                                    0.03532124f,    -0.016341697f,  0.09685456f,     -0.016764693f,
696                                                    0.051808182f,   0.05875331f,    -0.04536488f,    0.001626336f,
697                                                    -0.028892258f,  -0.01048663f,   -0.009793449f,   -0.017093895f,
698                                                    0.010987891f,   0.02357273f,    -0.00010856845f, 0.0099760275f,
699                                                    -0.001845119f,  -0.03551521f,   0.0018358806f,   0.05763657f,
700                                                    -0.01769146f,   0.040995963f,   0.02235177f,     -0.060430344f,
701                                                    0.11475477f,    -0.023854522f,  0.10071741f,     0.0686208f,
702                                                    -0.014250481f,  0.034261297f,   0.047418304f,    0.08562733f,
703                                                    -0.030519066f,  0.0060542435f,  0.014653856f,    -0.038836084f,
704                                                    0.04096551f,    0.032249358f,   -0.08355519f,    -0.026823482f,
705                                                    0.056386515f,   -0.010401743f,  -0.028396193f,   0.08507674f,
706                                                    0.014410365f,   0.020995233f,   0.17040324f,     0.11511526f,
707                                                    0.02459721f,    0.0066619175f,  0.025853224f,    -0.023133837f,
708                                                    -0.081302024f,  0.017264642f,   -0.009585969f,   0.09491168f,
709                                                    -0.051313367f,  0.054532815f,   -0.014298593f,   0.10657464f,
710                                                    0.007076659f,   0.10964551f,    0.0409152f,      0.008275321f,
711                                                    -0.07283536f,   0.07937492f,    0.04192024f,     -0.1075027f
712             });
713
714     auto recurrentToCellWeights =
715             MakeTensor<float, 2>(tensorInfo20x16, {-0.037322544f,   0.018592842f,   0.0056175636f,  -0.06253426f,
716                                                    0.055647098f,    -0.05713207f,   -0.05626563f,   0.005559383f,
717                                                    0.03375411f,     -0.025757805f,  -0.088049285f,  0.06017052f,
718                                                    -0.06570978f,    0.007384076f,   0.035123326f,   -0.07920549f,
719                                                    0.053676967f,    0.044480428f,   -0.07663568f,   0.0071805613f,
720                                                    0.08089997f,     0.05143358f,    0.038261272f,   0.03339287f,
721                                                    -0.027673481f,   0.044746667f,   0.028349208f,   0.020090483f,
722                                                    -0.019443132f,   -0.030755889f,  -0.0040000007f, 0.04465846f,
723                                                    -0.021585021f,   0.0031670958f,  0.0053199246f,  -0.056117613f,
724                                                    -0.10893326f,    0.076739706f,   -0.08509834f,   -0.027997585f,
725                                                    0.037871376f,    0.01449768f,    -0.09002357f,   -0.06111149f,
726                                                    -0.046195522f,   0.0422062f,     -0.005683705f,  -0.1253618f,
727                                                    -0.012925729f,   -0.04890792f,   0.06985068f,    0.037654128f,
728                                                    0.03398274f,     -0.004781977f,  0.007032333f,   -0.031787455f,
729                                                    0.010868644f,    -0.031489216f,  0.09525667f,    0.013939797f,
730                                                    0.0058680447f,   0.0167067f,     0.02668468f,    -0.04797466f,
731                                                    -0.048885044f,   -0.12722108f,   0.035304096f,   0.06554885f,
732                                                    0.00972396f,     -0.039238118f,  -0.05159735f,   -0.11329045f,
733                                                    0.1613692f,      -0.03750952f,   0.06529313f,    -0.071974665f,
734                                                    -0.11769596f,    0.015524369f,   -0.0013754242f, -0.12446318f,
735                                                    0.02786344f,     -0.014179351f,  0.005264273f,   0.14376344f,
736                                                    0.015983658f,    0.03406988f,    -0.06939408f,   0.040699873f,
737                                                    0.02111075f,     0.09669095f,    0.041345075f,   -0.08316494f,
738                                                    -0.07684199f,    -0.045768797f,  0.032298047f,   -0.041805092f,
739                                                    0.0119405f,      0.0061010392f,  0.12652606f,    0.0064572375f,
740                                                    -0.024950314f,   0.11574242f,    0.04508852f,    -0.04335324f,
741                                                    0.06760663f,     -0.027437469f,  0.07216407f,    0.06977076f,
742                                                    -0.05438599f,    0.034033038f,   -0.028602652f,  0.05346137f,
743                                                    0.043184172f,    -0.037189785f,  0.10420091f,    0.00882477f,
744                                                    -0.054019816f,   -0.074273005f,  -0.030617684f,  -0.0028467078f,
745                                                    0.024302477f,    -0.0038869337f, 0.005332455f,   0.0013399826f,
746                                                    0.04361412f,     -0.007001822f,  0.09631092f,    -0.06702025f,
747                                                    -0.042049985f,   -0.035070654f,  -0.04103342f,   -0.10273396f,
748                                                    0.0544271f,      0.037184782f,   -0.13150354f,   -0.0058036847f,
749                                                    -0.008264958f,   0.042035464f,   0.05891794f,    0.029673764f,
750                                                    0.0063542654f,   0.044788733f,   0.054816857f,   0.062257513f,
751                                                    -0.00093483756f, 0.048938446f,   -0.004952862f,  -0.007730018f,
752                                                    -0.04043371f,    -0.017094059f,  0.07229206f,    -0.023670016f,
753                                                    -0.052195564f,   -0.025616996f,  -0.01520939f,   0.045104615f,
754                                                    -0.007376126f,   0.003533447f,   0.006570588f,   0.056037236f,
755                                                    0.12436656f,     0.051817212f,   0.028532185f,   -0.08686856f,
756                                                    0.11868599f,     0.07663395f,    -0.07323171f,   0.03463402f,
757                                                    -0.050708205f,   -0.04458982f,   -0.11590894f,   0.021273347f,
758                                                    0.1251325f,      -0.15313013f,   -0.12224372f,   0.17228661f,
759                                                    0.023029093f,    0.086124025f,   0.006445803f,   -0.03496501f,
760                                                    0.028332196f,    0.04449512f,    -0.042436164f,  -0.026587414f,
761                                                    -0.006041347f,   -0.09292539f,   -0.05678812f,   0.03897832f,
762                                                    0.09465633f,     0.008115513f,   -0.02171956f,   0.08304309f,
763                                                    0.071401566f,    0.019622514f,   0.032163795f,   -0.004167056f,
764                                                    0.02295182f,     0.030739572f,   0.056506045f,   0.004612461f,
765                                                    0.06524936f,     0.059999723f,   0.046395954f,   -0.0045512207f,
766                                                    -0.1335546f,     -0.030136576f,  0.11584653f,    -0.014678886f,
767                                                    0.0020118146f,   -0.09688814f,   -0.0790206f,    0.039770417f,
768                                                    -0.0329582f,     0.07922767f,    0.029322514f,   0.026405897f,
769                                                    0.04207835f,     -0.07073373f,   0.063781224f,   0.0859677f,
770                                                    -0.10925287f,    -0.07011058f,   0.048005477f,   0.03438226f,
771                                                    -0.09606514f,    -0.006669445f,  -0.043381985f,  0.04240257f,
772                                                    -0.06955775f,    -0.06769346f,   0.043903265f,   -0.026784198f,
773                                                    -0.017840602f,   0.024307009f,   -0.040079936f,  -0.019946516f,
774                                                    0.045318738f,    -0.12233574f,   0.026170589f,   0.0074471775f,
775                                                    0.15978073f,     0.10185836f,    0.10298046f,    -0.015476589f,
776                                                    -0.039390966f,   -0.072174534f,  0.0739445f,     -0.1211869f,
777                                                    -0.0347889f,     -0.07943156f,   0.014809798f,   -0.12412325f,
778                                                    -0.0030663363f,  0.039695457f,   0.0647603f,     -0.08291318f,
779                                                    -0.018529687f,   -0.004423833f,  0.0037507233f,  0.084633216f,
780                                                    -0.01514876f,    -0.056505352f,  -0.012800942f,  -0.06994386f,
781                                                    0.012962922f,    -0.031234352f,  0.07029052f,    0.016418684f,
782                                                    0.03618972f,     0.055686004f,   -0.08663945f,   -0.017404709f,
783                                                    -0.054761406f,   0.029065743f,   0.052404847f,   0.020238016f,
784                                                    0.0048197987f,   -0.0214882f,    0.07078733f,    0.013016777f,
785                                                    0.06262858f,     0.009184685f,   0.020785125f,   -0.043904778f,
786                                                    -0.0270329f,     -0.03299152f,   -0.060088247f,  -0.015162964f,
787                                                    -0.001828936f,   0.12642565f,    -0.056757294f,  0.013586685f,
788                                                    0.09232601f,     -0.035886683f,  0.06000002f,    0.05229691f,
789                                                    -0.052580316f,   -0.082029596f,  -0.010794592f,  0.012947712f,
790                                                    -0.036429964f,   -0.085508935f,  -0.13127148f,   -0.017744139f,
791                                                    0.031502828f,    0.036232427f,   -0.031581745f,  0.023051167f,
792                                                    -0.05325106f,    -0.03421577f,   0.028793324f,   -0.034633752f,
793                                                    -0.009881397f,   -0.043551125f,  -0.018609839f,  0.0019097115f,
794                                                    -0.008799762f,   0.056595087f,   0.0022273948f,  0.055752404f
795             });
796
797     auto recurrentToOutputWeights =
798             MakeTensor<float, 2>(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
799                                                     -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
800                                                    -0.029587349f, -0.044576716f,  -0.07480124f,  -0.082868785f,
801                                                    0.023254942f,    0.027502948f, -0.0039728214f, -0.08683098f,
802                                                    -0.08116779f,  -0.014675607f,   -0.037924774f, -0.023314456f,
803                                                    -0.007401714f, -0.09255757f,  0.029460307f,    -0.08829125f,
804                                                     -0.005139627f,  -0.08989442f,  -0.0555066f,   0.13596267f,
805                                                    -0.025062224f, -0.048351806f,  -0.03850004f,  0.07266485f,
806                                                    -0.022414139f,   0.05940088f, 0.075114764f,   0.09597592f,
807                                                    -0.010211725f, -0.0049794707f,  -0.011523867f, -0.025980417f,
808                                                    0.072999895f,  0.11091378f,   -0.081685916f,   0.014416728f,
809                                                     0.043229222f,   0.034178585f,  -0.07530371f,  0.035837382f,
810                                                    -0.085607f, -0.007721233f,  -0.03287832f,  -0.043848954f,
811                                                    -0.06404588f,    -0.06632928f, -0.073643476f,  0.008214239f,
812                                                    -0.045984086f, 0.039764922f,    0.03474462f, 0.060612556f,
813                                                    -0.080590084f, 0.049127717f,  0.04151091f,     -0.030063879f,
814                                                     0.008801774f,   -0.023021035f, -0.019558564f, 0.05158114f,
815                                                    -0.010947698f, -0.011825728f,  0.0075720972f, 0.0699727f,
816                                                    -0.0039981045f,  0.069350146f, 0.08799282f,    0.016156472f,
817                                                    0.035502106f,  0.11695009f,     0.006217345f, 0.13392477f,
818                                                    -0.037875112f, 0.025745004f,  0.08940699f,     -0.00924166f,
819                                                     0.0046702605f,  -0.036598757f, -0.08811812f,  0.10522024f,
820                                                    -0.032441203f, 0.008176899f,   -0.04454919f,  0.07058152f,
821                                                    0.0067963637f,   0.039206743f, 0.03259838f,    0.03725492f,
822                                                    -0.09515802f,  0.013326398f,    -0.052055415f, -0.025676316f,
823                                                    0.03198509f,   -0.015951829f, -0.058556724f,   0.036879618f,
824                                                     0.043357447f,   0.028362012f,  -0.05908629f,  0.0059240665f,
825                                                    -0.04995891f, -0.019187413f,0.0276265f, -0.01628143f, 0.0025863599f,
826                                                    0.08800015f, 0.035250366f,   -0.022165963f, -0.07328642f,
827                                                    -0.009415526f,   -0.07455109f, 0.11690406f,    0.0363299f,
828                                                    0.07411125f,   0.042103454f,    -0.009660886f, 0.019076364f,
829                                                    0.018299393f, -0.046004917f, 0.08891175f,0.0431396f, -0.026327137f,
830                                                    -0.051502608f, 0.08979574f,   -0.051670972f,   0.04940282f,
831                                                     -0.07491107f,   -0.021240504f, 0.022596184f,  -0.034280192f,
832                                                    0.060163025f, -0.058211457f,  -0.051837247f, -0.01349775f,
833                                                    -0.04639988f,    -0.035936575f, -0.011681591f,  0.064818054f,
834                                                    0.0073146066f, -0.021745546f,   -0.043124277f, -0.06471268f,
835                                                    -0.07053354f,  -0.029321948f, -0.05330136f,    0.016933719f,
836                                                     -0.053782392f,  0.13747959f,   -0.1361751f,   -0.11569455f,
837                                                    0.0033329215f, 0.05693899f,    -0.053219706f, 0.063698f,
838                                                    0.07977434f,     -0.07924483f, 0.06936997f,    0.0034815092f,
839                                                    -0.007305279f, -0.037325785f,   -0.07251102f, -0.033633437f,
840                                                    -0.08677009f,  0.091591336f,  -0.14165086f,    0.021752775f,
841                                                     0.019683983f,   0.0011612234f, -0.058154266f, 0.049996935f,
842                                                    0.0288841f, -0.0024567875f, -0.14345716f, 0.010955264f,-0.10234828f,
843                                                    0.1183656f, -0.0010731248f, -0.023590032f,-0.072285876f,-0.0724771f,
844                                                    -0.026382286f, -0.0014920527f, 0.042667855f,  0.0018776858f,
845                                                    0.02986552f,     0.009814309f, 0.0733756f,     0.12289186f,
846                                                    0.018043943f,  -0.0458958f,     0.049412545f, 0.033632483f,
847                                                    0.05495232f,   0.036686596f,  -0.013781798f,   -0.010036754f,
848                                                     0.02576849f,    -0.08307328f,  0.010112348f,  0.042521734f,
849                                                    -0.05869831f, -0.071689695f, 0.03876447f, -0.13275425f, -0.0352966f,
850                                                    -0.023077697f, 0.10285965f,    0.084736146f,  0.15568255f,
851                                                    -0.00040734606f, 0.027835453f, -0.10292561f,   -0.032401145f,
852                                                    0.10053256f,   -0.026142767f,   -0.08271222f, -0.0030240538f,
853                                                    -0.016368777f, 0.1070414f,    0.042672627f,    0.013456989f,
854                                                     -0.0437609f,    -0.022309763f, 0.11576483f,   0.04108048f,
855                                                    0.061026827f, -0.0190714f,  -0.0869359f, 0.037901703f,  0.0610107f,
856                                                    0.07202949f, 0.01675338f,    0.086139716f,  -0.08795751f,
857                                                    -0.014898893f,   -0.023771819f, -0.01965048f,   0.007955471f,
858                                                    -0.043740474f, 0.03346837f,     -0.10549954f, 0.090567775f,
859                                                    0.042013682f,  -0.03176985f,  0.12569028f,     -0.02421228f,
860                                                     -0.029526481f,  0.023851605f,  0.031539805f,  0.05292009f,
861                                                    -0.02344001f, -0.07811758f,   -0.08834428f,  0.10094801f,
862                                                    0.16594367f,     -0.06861939f, -0.021256343f,  -0.041093912f,
863                                                    -0.06669611f,  0.035498552f,    0.021757556f, -0.09302526f,
864                                                    -0.015403468f, -0.06614931f,  -0.051798206f,   -0.013874718f,
865                                                     0.03630673f,    0.010412845f,  -0.08077351f,  0.046185967f,
866                                                    0.0035662893f, 0.03541868f,    -0.094149634f, -0.034814864f,
867                                                    0.003128424f,    -0.020674974f, -0.03944324f,   -0.008110165f,
868                                                    -0.11113267f,  0.08484226f,     0.043586485f, 0.040582247f,
869                                                    0.0968012f,    -0.065249965f, -0.028036479f,   0.0050708856f,
870                                                     0.0017462453f,  0.0326779f,    0.041296225f,  0.09164146f,
871                                                    -0.047743853f, -0.015952192f,  -0.034451712f, 0.084197424f,
872                                                    -0.05347844f,    -0.11768019f, 0.085926116f,   -0.08251791f,
873                                                    -0.045081906f, 0.0948852f,      0.068401024f, 0.024856757f,
874                                                    0.06978981f,   -0.057309967f, -0.012775832f,   -0.0032452994f,
875                                                     0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f
876             });
877
878     auto cellToInputWeights =
879             MakeTensor<float, 1>(tensorInfo20, {0.040369894f, 0.030746894f,  0.24704495f,  0.018586371f, -0.037586458f,
880                                                 -0.15312155f, -0.11812848f,  -0.11465643f, 0.20259799f,   0.11418174f,
881                                                 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
882                                                 0.21198851f,  -0.38871562f,  -0.09061183f, -0.09683246f,  -0.21929175f
883             });
884
885
886     auto cellToForgetWeights =
887             MakeTensor<float, 1>(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f,   -0.012770197f, 0.041331276f,
888                                                 -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
889                                                 -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f,   -0.020432774f,
890                                                 0.64658105f,   -0.06650122f,  -0.03467612f,  0.095340036f, 0.23647355f
891             });
892
893     auto cellToOutputWeights =
894             MakeTensor<float, 1>(tensorInfo20, {0.08286371f,  -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
895                                                 -0.5495371f,  -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
896                                                 -0.11940523f, 0.007358328f, 0.1890978f,   0.4833202f,   -0.34441817f,
897                                                 0.36312827f,  -0.26375428f, 0.1457655f,   -0.19724406f, 0.15548733f
898             });
899
900     auto projectionWeights =
901             MakeTensor<float, 2>(tensorInfo16x20,
902                                  {-0.009802181f,  0.09401916f,    0.0717386f,     -0.13895074f,  0.09641832f,
903                                   0.060420845f,   0.08539281f,    0.054285463f,   0.061395317f,  0.034448683f,
904                                   -0.042991187f,  0.019801661f,   -0.16840284f,   -0.015726732f, -0.23041931f,
905                                   -0.024478018f,  -0.10959692f,   -0.013875541f,  0.18600968f,   -0.061274476f,
906                                   0.0138165f,     -0.08160894f,   -0.07661644f,   0.032372914f,  0.16169067f,
907                                   0.22465782f,    -0.03993472f,   -0.004017731f,  0.08633481f,   -0.28869787f,
908                                   0.08682067f,    0.17240396f,    0.014975425f,   0.056431185f,  0.031037588f,
909                                   0.16702051f,    0.0077946745f,  0.15140012f,    0.29405436f,   0.120285f,
910                                   -0.188994f,     -0.027265169f,  0.043389652f,   -0.022061434f, 0.014777949f,
911                                   -0.20203483f,   0.094781205f,   0.19100232f,    0.13987629f,   -0.036132768f,
912                                   -0.06426278f,   -0.05108664f,   0.13221376f,    0.009441198f,  -0.16715929f,
913                                   0.15859416f,    -0.040437475f,  0.050779544f,   -0.022187516f, 0.012166504f,
914                                   0.027685808f,   -0.07675938f,   -0.0055694645f, -0.09444123f,  0.0046453946f,
915                                   0.050794356f,   0.10770313f,    -0.20790008f,   -0.07149004f,  -0.11425117f,
916                                   0.008225835f,   -0.035802525f,  0.14374903f,    0.15262283f,   0.048710253f,
917                                   0.1847461f,     -0.007487823f,  0.11000021f,    -0.09542012f,  0.22619456f,
918                                   -0.029149994f,  0.08527916f,    0.009043713f,   0.0042746216f, 0.016261552f,
919                                   0.022461696f,   0.12689082f,    -0.043589946f,  -0.12035478f,  -0.08361797f,
920                                   -0.050666027f,  -0.1248618f,    -0.1275799f,    -0.071875185f, 0.07377272f,
921                                   0.09944291f,    -0.18897448f,   -0.1593054f,    -0.06526116f,  -0.040107165f,
922                                   -0.004618631f,  -0.067624845f,  -0.007576253f,  0.10727444f,   0.041546922f,
923                                   -0.20424393f,   0.06907816f,    0.050412357f,   0.00724631f,   0.039827548f,
924                                   0.12449835f,    0.10747581f,    0.13708383f,    0.09134148f,   -0.12617786f,
925                                   -0.06428341f,   0.09956831f,    0.1208086f,     -0.14676677f,  -0.0727722f,
926                                   0.1126304f,     0.010139365f,   0.015571211f,   -0.038128063f, 0.022913318f,
927                                   -0.042050496f,  0.16842307f,    -0.060597885f,  0.10531834f,   -0.06411776f,
928                                   -0.07451711f,   -0.03410368f,   -0.13393489f,   0.06534304f,   0.003620307f,
929                                   0.04490757f,    0.05970546f,    0.05197996f,    0.02839995f,   0.10434969f,
930                                   -0.013699693f,  -0.028353551f,  -0.07260381f,   0.047201227f,  -0.024575593f,
931                                   -0.036445823f,  0.07155557f,    0.009672501f,   -0.02328883f,  0.009533515f,
932                                   -0.03606021f,   -0.07421458f,   -0.028082801f,  -0.2678904f,   -0.13221288f,
933                                   0.18419984f,    -0.13012612f,   -0.014588381f,  -0.035059117f, -0.04824723f,
934                                   0.07830115f,    -0.056184657f,  0.03277091f,    0.025466874f,  0.14494097f,
935                                   -0.12522776f,   -0.098633975f,  -0.10766018f,   -0.08317623f,  0.08594209f,
936                                   0.07749552f,    0.039474737f,   0.1776665f,     -0.07409566f,  -0.0477268f,
937                                   0.29323658f,    0.10801441f,    0.1154011f,     0.013952499f,  0.10739139f,
938                                   0.10708251f,    -0.051456142f,  0.0074137426f,  -0.10430189f,  0.10034707f,
939                                   0.045594677f,   0.0635285f,     -0.0715442f,    -0.089667566f, -0.10811871f,
940                                   0.00026344223f, 0.08298446f,    -0.009525053f,  0.006585689f,  -0.24567553f,
941                                   -0.09450807f,   0.09648481f,    0.026996298f,   -0.06419476f,  -0.04752702f,
942                                   -0.11063944f,   -0.23441927f,   -0.17608605f,   -0.052156363f, 0.067035615f,
943                                   0.19271925f,    -0.0032889997f, -0.043264326f,  0.09663576f,   -0.057112187f,
944                                   -0.10100678f,   0.0628376f,     0.04447668f,    0.017961001f,  -0.10094388f,
945                                   -0.10190601f,   0.18335468f,    0.10494553f,    -0.052095775f, -0.0026118709f,
946                                   0.10539724f,    -0.04383912f,   -0.042349473f,  0.08438151f,   -0.1947263f,
947                                   0.02251204f,    0.11216432f,    -0.10307853f,   0.17351969f,   -0.039091777f,
948                                   0.08066188f,    -0.00561982f,   0.12633002f,    0.11335965f,   -0.0088127935f,
949                                   -0.019777594f,  0.06864014f,    -0.059751723f,  0.016233567f,  -0.06894641f,
950                                   -0.28651384f,   -0.004228674f,  0.019708522f,   -0.16305895f,  -0.07468996f,
951                                   -0.0855457f,    0.099339016f,   -0.07580735f,   -0.13775392f,  0.08434318f,
952                                   0.08330512f,    -0.12131499f,   0.031935584f,   0.09180414f,   -0.08876437f,
953                                   -0.08049874f,   0.008753825f,   0.03498998f,    0.030215185f,  0.03907079f,
954                                   0.089751154f,   0.029194152f,   -0.03337423f,   -0.019092513f, 0.04331237f,
955                                   0.04299654f,    -0.036394123f,  -0.12915532f,   0.09793732f,   0.07512415f,
956                                   -0.11319543f,   -0.032502122f,  0.15661901f,    0.07671967f,   -0.005491124f,
957                                   -0.19379048f,   -0.218606f,     0.21448623f,    0.017840758f,  0.1416943f,
958                                   -0.07051762f,   0.19488361f,    0.02664691f,    -0.18104725f,  -0.09334311f,
959                                   0.15026465f,    -0.15493552f,   -0.057762887f,  -0.11604192f,  -0.262013f,
960                                   -0.01391798f,   0.012185008f,   0.11156489f,    -0.07483202f,  0.06693364f,
961                                   -0.26151478f,   0.046425626f,   0.036540434f,   -0.16435726f,  0.17338543f,
962                                   -0.21401681f,   -0.11385144f,   -0.08283257f,   -0.069031075f, 0.030635102f,
963                                   0.010969227f,   0.11109743f,    0.010919218f,   0.027526086f,  0.13519906f,
964                                   0.01891392f,    -0.046839405f,  -0.040167913f,  0.017953383f,  -0.09700955f,
965                                   0.0061885654f,  -0.07000971f,   0.026893595f,   -0.038844477f, 0.14543656f
966                                  });
967
968     std::vector<float> projectionBiasVector(outputSize, 0.f);
969     auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
970
971     armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
972     armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
973     armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
974     armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
975     armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
976     armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
977     armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
978     armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
979     armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
980     armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
981     armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
982     armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
983     armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
984     armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
985     armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
986     armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
987     armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
988
989     AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
990     AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
991     AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
992     AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
993     AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
994     AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
995     AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
996     AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
997     AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
998     AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
999     AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1000     AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1001     AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1002     AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1003     AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1004     AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1005     AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1006
1007     data.m_InputToInputWeights = &inputToInputWeightsTensor;
1008     data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1009     data.m_InputToCellWeights = &inputToCellWeightsTensor;
1010     data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1011     data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1012     data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1013     data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1014     data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1015     data.m_CellToInputWeights = &cellToInputWeightsTensor;
1016     data.m_InputGateBias = &inputGateBiasTensor;
1017     data.m_ForgetGateBias = &forgetGateBiasTensor;
1018     data.m_CellBias = &cellBiasTensor;
1019     data.m_OutputGateBias = &outputGateBiasTensor;
1020     data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1021     data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1022     data.m_ProjectionWeights = &projectionWeightsTensor;
1023     data.m_ProjectionBias = &projectionBiasTensor;
1024
1025     // Flags to set test configuration
1026     data.m_Parameters.m_ActivationFunc = 4;
1027     data.m_Parameters.m_CifgEnabled = false;
1028     data.m_Parameters.m_PeepholeEnabled = true;
1029     data.m_Parameters.m_ProjectionEnabled = true;
1030
1031
1032     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1033     inputHandle->Allocate();
1034     outputStateInHandle->Allocate();
1035     cellStateInHandle->Allocate();
1036
1037     scratchHandle->Allocate();
1038     outputStateOutHandle->Allocate();
1039     cellStateOutHandle->Allocate();
1040     outputHandle->Allocate();
1041
1042     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1043     CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1044     CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1045
1046     workload->Execute();
1047
1048     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1049
1050     return ret;
1051
1052 }
1053
1054 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1055 LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
1056         armnn::IWorkloadFactory& workloadFactory,
1057         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1058         const boost::multi_array<T, 2>& input,
1059         const boost::multi_array<T, 2>& outputExpected,
1060         float qScale = 0.0f,
1061         int32_t qOffset = 0,
1062         armnn::DataType constantDataType = armnn::DataType::Float32)
1063 {
1064     boost::ignore_unused(memoryManager);
1065     bool cifgEnabled = true;
1066     bool peepholeEnabled = true;
1067     bool projectionEnabled = false;
1068     // These are not the input and the output of Lstm yet
1069     unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
1070     unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
1071
1072     unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
1073
1074     const unsigned int cellSize = outputSize;
1075
1076     // Decide the shape of all input tensors
1077     armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset); // change to ArmnnType
1078     armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1079     armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
1080
1081     unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4;
1082     armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, ArmnnType, qScale, qOffset);
1083     armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1084     armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
1085     armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1086
1087     // List of inputs
1088     std::vector<float> inputData;
1089     inputData.assign(input.data(), input.data() + batchSize*inputSize);
1090     auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputData);
1091
1092     std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1093     auto outputStateInTensor = MakeTensor<float, 2>(outputStateInTensorInfo, outputStateInVector);
1094
1095     std::vector<float> cellStateInVector(batchSize * cellSize, 0.f);
1096     auto cellStateInTensor = MakeTensor<float, 2>(cellStateInTensorInfo, cellStateInVector);
1097
1098
1099     // Prepare all the weights in the descriptor for LSTM
1100     armnn::LstmQueueDescriptor data;
1101     armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, constantDataType, qScale, qOffset);
1102     armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset);
1103     armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset);
1104
1105     auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
1106                                                      {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
1107                                                      0.04717243f, 0.48944736f, -0.38535351f,
1108                                                      -0.17212132f});
1109     auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfoInput,
1110                                                      {-0.55291498f, -0.42866567f, 0.13056988f,
1111                                                        -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
1112                                                        0.33826375f});
1113     auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfoInput,
1114                                                      {0.10725588f, -0.02335852f, -0.55932593f,
1115                                                        -0.09426838f, -0.44257352f, 0.54939759f,
1116                                                        0.01533556f, 0.42751634f});
1117     auto cellBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1118     auto forgetGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f});
1119     auto outputGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1120
1121     auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfoOutput,
1122                 {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
1123                  0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
1124                  0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
1125                  0.21193194f});
1126     auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfoOutput,
1127                  {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
1128                   0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
1129                   -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f});
1130
1131     auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfoOutput,
1132                 {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
1133                  -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1134                  0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f});
1135
1136     auto cellToForgetWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1137                 {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f});
1138     auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1139                 {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
1140
1141     armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
1142     armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
1143     armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
1144
1145     armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
1146     armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
1147     armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
1148
1149     armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
1150     armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
1151     armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
1152
1153
1154     armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
1155     armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
1156
1157     AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1158     AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1159     AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1160
1161     AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1162     AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1163     AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1164
1165     AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1166     AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1167     AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1168
1169     AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1170     AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1171
1172
1173     data.m_InputToCellWeights = &inputToCellWeightsTensor;
1174     data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1175     data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1176
1177     data.m_CellBias = &cellBiasTensor;
1178     data.m_ForgetGateBias = &forgetGateBiasTensor;
1179     data.m_OutputGateBias = &outputGateBiasTensor;
1180
1181     data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1182     data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1183     data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1184
1185     data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1186     data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1187
1188     // other parameters for the descriptor
1189     data.m_Parameters.m_CifgEnabled = cifgEnabled;
1190     data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
1191     data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
1192
1193     data.m_Parameters.m_ActivationFunc = 4;
1194     data.m_Parameters.m_ClippingThresProj = 0.0;
1195     data.m_Parameters.m_ClippingThresCell = 0.0;
1196
1197
1198     // List of outputs
1199     std::vector<float> scratchBufferVector(batchSize * scratchBufferSize, 0.f);
1200     auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
1201     LayerTestResult<T, 2> ret0(scratchBufferTensorInfo);
1202
1203     // Output state for a certain time step
1204     std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1205     auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
1206     LayerTestResult<T, 2> ret1(outputStateOutTensorInfo);
1207
1208     // Cell state for a certain time step
1209     std::vector<float> cellStateOutVector(batchSize * cellSize, 0.f);
1210     auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
1211     LayerTestResult<T, 2> ret2(cellStateOutTensorInfo);
1212
1213     // Output for a certain time step
1214     std::vector<float> outputVector(batchSize * outputSize, 0.f);
1215     auto outputTensor = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1216     std::vector<float> outputData;
1217     outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize);
1218     LayerTestResult<T, 2> ret3(outputTensorInfo);
1219     ret3.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputData);
1220
1221     // Prepare the inputs and outputs for the workload
1222     std::unique_ptr<armnn::ITensorHandle> inputHandle =
1223             workloadFactory.CreateTensorHandle(inputTensorInfo);
1224     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1225             workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
1226     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1227             workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
1228
1229     std::unique_ptr<armnn::ITensorHandle> scratchBufferHandle =
1230             workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
1231     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1232             workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
1233     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1234             workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
1235     std::unique_ptr<armnn::ITensorHandle> outputHandle =
1236             workloadFactory.CreateTensorHandle(outputTensorInfo);
1237
1238     armnn::WorkloadInfo info;
1239     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1240     AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1241     AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1242
1243     AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchBufferHandle.get());
1244     AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1245     AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1246     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1247
1248     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1249
1250
1251     inputHandle->Allocate();
1252     outputStateInHandle->Allocate();
1253     cellStateInHandle->Allocate();
1254
1255     scratchBufferHandle->Allocate();
1256     outputStateOutHandle->Allocate();
1257     cellStateOutHandle->Allocate();
1258     outputHandle->Allocate();
1259
1260
1261     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1262     CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1263     CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1264
1265     CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]);
1266     CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]);
1267     CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]);
1268
1269     workload->Execute();
1270
1271     CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get());
1272     CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get());
1273     CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get());
1274     CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get());
1275
1276     return ret3;
1277 }
1278
1279 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1280 LayerTestResult<T, 2>
1281 LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
1282                                                   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1283                                                   const boost::multi_array<T, 2>& input,
1284                                                   const boost::multi_array<T, 2>& outputExpected,
1285                                                   float qScale = 0.0f,
1286                                                   int32_t qOffset = 0,
1287                                                   armnn::DataType constantDataType = armnn::DataType::Float32)
1288 {
1289     boost::ignore_unused(memoryManager);
1290     unsigned int batchSize = 2;
1291     unsigned int outputSize = 3;
1292     unsigned int inputSize = 5;
1293     unsigned numUnits = 4;
1294
1295     armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
1296     armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
1297     armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
1298
1299     // Scratch buffer size without CIFG [batchSize, numUnits * 4]
1300     armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
1301     armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
1302     armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1303     armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1304
1305     LayerTestResult<T, 2> ret(outputTensorInfo);
1306
1307     std::vector<float> inputVector;
1308     inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
1309     auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
1310
1311     std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1312     auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
1313
1314     std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1315     auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
1316
1317     std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
1318     auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
1319
1320     std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1321     auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
1322
1323     std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
1324     auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
1325
1326     std::vector<float> outputVector;
1327     outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
1328     ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1329
1330     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1331     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1332             workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
1333     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1334             workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
1335
1336     std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
1337     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1338             workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
1339     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1340             workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
1341     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1342
1343     armnn::LstmQueueDescriptor data;
1344     armnn::WorkloadInfo info;
1345
1346     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1347     AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1348     AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1349
1350     AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
1351     AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1352     AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1353     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1354
1355     armnn::TensorInfo tensorInfo3({outputSize}, constantDataType, qScale, qOffset);
1356     armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
1357     armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
1358     armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset);
1359     armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset);
1360
1361     auto inputToInputWeights =
1362             MakeTensor<float, 2>(tensorInfo4x5, { 0.5f,  0.6f,  0.7f, -0.8f, -0.9f,
1363                                                   0.1f,  0.2f,  0.3f, -0.4f,  0.5f,
1364                                                  -0.8f,  0.7f, -0.6f,  0.5f, -0.4f,
1365                                                  -0.5f, -0.4f, -0.3f, -0.2f, -0.1f});  //{numUnits, inputSize}
1366
1367     auto inputToForgetWeights =
1368             MakeTensor<float, 2>(tensorInfo4x5, {-0.6f, -0.1f,  0.3f,  0.2f,  0.9f,
1369                                                  -0.5f, -0.2f, -0.4f,  0.3f, -0.8f,
1370                                                  -0.4f,  0.3f, -0.5f, -0.4f, -0.6f,
1371                                                   0.3f, -0.4f, -0.6f, -0.5f, -0.5f});  //{numUnits, inputSize}
1372
1373     auto inputToCellWeights =
1374             MakeTensor<float, 2>(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f,
1375                                                   0.5f, -0.2f, -0.3f, -0.2f, -0.6f,
1376                                                   0.6f, -0.1f, -0.4f, -0.3f, -0.7f,
1377                                                   0.7f, -0.9f, -0.5f,  0.8f,  0.6f});  //{numUnits, inputSize}
1378
1379     auto inputToOutputWeights =
1380             MakeTensor<float, 2>(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f,
1381                                                  -0.7f,  0.3f, -0.3f, -0.8f, -0.2f,
1382                                                   0.6f, -0.2f,  0.4f, -0.7f, -0.3f,
1383                                                  -0.5f,  0.1f,  0.5f, -0.6f, -0.4f}); //{numUnits, inputSize}
1384
1385     auto inputGateBias =
1386             MakeTensor<float, 1>(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f});  //{numUnits}
1387
1388     auto forgetGateBias =
1389             MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f});    //{numUnits}
1390
1391     auto cellBias =
1392             MakeTensor<float, 1>(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits}
1393
1394     auto outputGateBias =
1395             MakeTensor<float, 1>(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f});   //{numUnits}
1396
1397     auto recurrentToInputWeights =
1398             MakeTensor<float, 2>(tensorInfo4x3, {-0.2f, -0.3f,  0.4f,
1399                                                   0.1f, -0.5f,  0.9f,
1400                                                  -0.2f, -0.3f, -0.7f,
1401                                                  0.05f, -0.2f, -0.6f});  //{numUnits, outputSize}
1402
1403     auto recurrentToCellWeights =
1404             MakeTensor<float, 2>(tensorInfo4x3, {-0.3f,  0.2f,   0.1f,
1405                                                  -0.3f,  0.8f, -0.08f,
1406                                                  -0.2f,  0.3f,   0.8f,
1407                                                  -0.6f, -0.1f,   0.2f}); //{numUnits, outputSize}
1408
1409     auto recurrentToForgetWeights =
1410             MakeTensor<float, 2>(tensorInfo4x3, {-0.5f, -0.3f, -0.5f,
1411                                                  -0.2f,  0.6f,  0.4f,
1412                                                   0.9f,  0.3f, -0.1f,
1413                                                   0.2f,  0.5f,  0.2f});  //{numUnits, outputSize}
1414
1415     auto recurrentToOutputWeights =
1416             MakeTensor<float, 2>(tensorInfo4x3, { 0.3f, -0.1f,  0.1f,
1417                                                  -0.2f, -0.5f, -0.7f,
1418                                                  -0.2f, -0.6f, -0.1f,
1419                                                  -0.4f, -0.7f, -0.2f});  //{numUnits, outputSize}
1420
1421     auto cellToInputWeights =
1422             MakeTensor<float, 1>(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f});      //{numUnits}
1423
1424     auto cellToForgetWeights =
1425             MakeTensor<float, 1>(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits}
1426
1427     auto cellToOutputWeights =
1428             MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f});      //{numUnits}
1429
1430     auto projectionWeights =
1431             MakeTensor<float, 2>(tensorInfo3x4,
1432                                  {-0.1f, 0.2f, 0.01f, -0.2f,
1433                                    0.1f, 0.5f,  0.3f, 0.08f,
1434                                   0.07f, 0.2f, -0.4f,  0.2f}); //{outputSize, numUnits}
1435
1436     std::vector<float> projectionBiasVector(outputSize, 0.f);
1437     auto projectionBias = MakeTensor<float,1>(tensorInfo3, projectionBiasVector); //{outputSize}
1438
1439     auto inputLayerNormWeights =
1440             MakeTensor<float, 1>(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits}
1441
1442     auto forgetLayerNormWeights =
1443             MakeTensor<float, 1>(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits}
1444
1445     auto cellLayerNormWeights =
1446             MakeTensor<float, 1>(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits}
1447
1448     auto outputLayerNormWeights =
1449             MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
1450
1451
1452     armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
1453     armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
1454     armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
1455     armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
1456     armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
1457     armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
1458     armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
1459     armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
1460     armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
1461     armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
1462     armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
1463     armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
1464     armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
1465     armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
1466     armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
1467     armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
1468     armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
1469
1470     armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
1471     armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
1472     armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
1473     armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
1474
1475     AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1476     AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1477     AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1478     AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1479     AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1480     AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1481     AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1482     AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1483     AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
1484     AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1485     AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1486     AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1487     AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1488     AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1489     AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1490     AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1491     AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1492
1493     AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
1494     AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
1495     AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
1496     AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
1497
1498     data.m_InputToInputWeights = &inputToInputWeightsTensor;
1499     data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1500     data.m_InputToCellWeights = &inputToCellWeightsTensor;
1501     data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1502     data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1503     data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1504     data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1505     data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1506     data.m_CellToInputWeights = &cellToInputWeightsTensor;
1507     data.m_InputGateBias = &inputGateBiasTensor;
1508     data.m_ForgetGateBias = &forgetGateBiasTensor;
1509     data.m_CellBias = &cellBiasTensor;
1510     data.m_OutputGateBias = &outputGateBiasTensor;
1511     data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1512     data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1513     data.m_ProjectionWeights = &projectionWeightsTensor;
1514     data.m_ProjectionBias = &projectionBiasTensor;
1515
1516     data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1517     data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1518     data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1519     data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1520
1521     // Flags to set test configuration
1522     data.m_Parameters.m_ActivationFunc = 4;
1523     data.m_Parameters.m_CifgEnabled = false;
1524     data.m_Parameters.m_PeepholeEnabled = true;
1525     data.m_Parameters.m_ProjectionEnabled = true;
1526     data.m_Parameters.m_LayerNormEnabled = true;
1527
1528
1529     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1530     inputHandle->Allocate();
1531     outputStateInHandle->Allocate();
1532     cellStateInHandle->Allocate();
1533
1534     scratchHandle->Allocate();
1535     outputStateOutHandle->Allocate();
1536     cellStateOutHandle->Allocate();
1537     outputHandle->Allocate();
1538
1539     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1540     CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1541     CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1542
1543     workload->Execute();
1544
1545     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1546
1547     return ret;
1548 }
1549
1550 LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
1551     armnn::IWorkloadFactory& workloadFactory,
1552     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1553     const boost::multi_array<uint8_t, 2>& input,
1554     const boost::multi_array<uint8_t, 2>& outputExpected)
1555 {
1556     boost::ignore_unused(memoryManager);
1557     auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
1558     auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
1559     auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
1560
1561     // Scale/Offset for input/output, cellState In/Out, weights, bias
1562     float inputOutputScale = 0.0078125f;
1563     int32_t inputOutputOffset = 128;
1564
1565     float cellStateScale = 0.00048828125f;
1566     int32_t cellStateOffset = 0;
1567
1568     float weightsScale = 0.00408021f;
1569     int32_t weightsOffset = 100;
1570
1571     float biasScale = 3.1876640625e-05f;
1572     int32_t biasOffset = 0;
1573
1574     // Input/Output tensor info
1575     armnn::TensorInfo inputInfo({numBatches , inputSize},
1576                                  armnn::DataType::QuantisedAsymm8,
1577                                  inputOutputScale,
1578                                  inputOutputOffset);
1579
1580     armnn::TensorInfo cellStateInfo({numBatches , outputSize},
1581                                      armnn::DataType::QuantisedSymm16,
1582                                      cellStateScale,
1583                                      cellStateOffset);
1584
1585     armnn::TensorInfo outputStateInfo({numBatches , outputSize},
1586                                        armnn::DataType::QuantisedAsymm8,
1587                                        inputOutputScale,
1588                                        inputOutputOffset);
1589
1590     LayerTestResult<uint8_t, 2> ret(outputStateInfo);
1591
1592     // Input0
1593     std::vector<uint8_t> inputVector;
1594     inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
1595     auto inputTensor = MakeTensor<uint8_t, 2>(inputInfo, inputVector);
1596
1597     // Input1
1598     std::vector<int16_t> cellStateInVector   = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13
1599     auto cellStateInTensor   = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
1600
1601     // Input2
1602     std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14
1603     auto outputStateInTensor = MakeTensor<uint8_t, 2>(outputStateInfo, outputStateInVector);
1604
1605     // Output0
1606     std::vector<int16_t> cellStateOutVector  = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0
1607     auto cellStateOutTensor  = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
1608
1609     // Output1
1610     std::vector<uint8_t> outputVector; // 1
1611     outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
1612     ret.outputExpected = MakeTensor<uint8_t, 2>(outputStateInfo, outputVector);
1613
1614     // Create tensor handles
1615     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
1616     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1617             workloadFactory.CreateTensorHandle(cellStateInfo);
1618     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1619             workloadFactory.CreateTensorHandle(outputStateInfo);
1620
1621     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1622             workloadFactory.CreateTensorHandle(cellStateInfo);
1623     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
1624
1625     armnn::QuantizedLstmQueueDescriptor data;
1626     armnn::WorkloadInfo info;
1627
1628     // Add inputs and outputs to workload
1629     AddInputToWorkload(data, info, inputInfo, inputHandle.get());
1630     AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
1631     AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
1632
1633     AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
1634     AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
1635
1636     // Weights and bias tensor and quantization info
1637     armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
1638                                         armnn::DataType::QuantisedAsymm8,
1639                                         weightsScale,
1640                                         weightsOffset);
1641
1642     armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
1643                                             armnn::DataType::QuantisedAsymm8,
1644                                             weightsScale,
1645                                             weightsOffset);
1646
1647     armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
1648
1649     // Weights and bias tensor data
1650     auto inputToInputWeights  = MakeTensor<uint8_t, 2>(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108});
1651     auto inputToForgetWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169});
1652     auto inputToCellWeights   = MakeTensor<uint8_t, 2>(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183});
1653     auto inputToOutputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48});
1654
1655     auto recurrentToInputWeights  = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1656             {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26});
1657     auto recurrentToForgetWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1658             {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253});
1659     auto recurrentToCellWeights   = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1660             {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216});
1661     auto recurrentToOutputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1662             {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98});
1663
1664     auto inputGateBias  = MakeTensor<int32_t, 1>(biasInfo, {-7876, 13488, -726, 32839});
1665     auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {9206, -46884, -11693, -38724});
1666     auto cellBias       = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
1667     auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
1668
1669     // ScopedCpuTensorHandles
1670     armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
1671     armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
1672     armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
1673     armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
1674
1675     armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
1676     armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
1677     armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
1678     armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
1679
1680     armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
1681     armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
1682     armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
1683     armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
1684
1685     // Allocate and copy data
1686     AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1687     AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1688     AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1689     AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1690
1691     AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1692     AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1693     AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1694     AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1695
1696     AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1697     AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1698     AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1699     AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1700
1701     // Setup queue descriptor
1702     data.m_InputToInputWeights = &inputToInputWeightsTensor;
1703     data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1704     data.m_InputToCellWeights = &inputToCellWeightsTensor;
1705     data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1706
1707     data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1708     data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1709     data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1710     data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1711
1712     data.m_InputGateBias = &inputGateBiasTensor;
1713     data.m_ForgetGateBias = &forgetGateBiasTensor;
1714     data.m_CellBias = &cellBiasTensor;
1715     data.m_OutputGateBias = &outputGateBiasTensor;
1716
1717     // Create workload and allocate tensor handles
1718     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantizedLstm(data, info);
1719     inputHandle->Allocate();
1720     outputStateInHandle->Allocate();
1721     cellStateInHandle->Allocate();
1722
1723     cellStateOutHandle->Allocate();
1724     outputHandle->Allocate();
1725
1726     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1727     CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1728     CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1729
1730     workload->Execute();
1731
1732     CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1733
1734     return ret;
1735 }
1736
1737 } // anonymous namespace
1738
1739 #if defined(ARMNNREF_ENABLED)
1740
1741 // The LSTM test units are run only for the reference backend at the moment
1742
1743 void LstmUtilsZeroVectorTest()
1744 {
1745     armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
1746     boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1747             {2., 3., 3., 4.}));
1748
1749     boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1750             {0., 0., 0., 0.}));
1751
1752     return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
1753 }
1754
1755 void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
1756 {
1757     uint32_t batchSize = 2;
1758     uint32_t vecSize = 4;
1759     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1760     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1761             { 0.1f, 0.2f, 0.3f, 0.4f,      //batch 0
1762               0.9f, 1.0f, 1.1f, 1.2f }));  //batch 1
1763
1764     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1765             { -1.34164071f, -0.447213531f, 0.44721365f,  1.34164071f,      //batch 0
1766               -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f  }));  //batch 1
1767
1768     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1769             vecSize, batchSize, expectedOutput);
1770 }
1771
1772 void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
1773 {
1774     uint32_t batchSize = 2;
1775     uint32_t vecSize = 4;
1776     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1777     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1778             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
1779               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
1780
1781     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1782             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
1783               0.0f, 0.0f, 0.0f, 0.0f }));  //batch 1
1784
1785     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1786             vecSize, batchSize, expectedOutput);
1787 }
1788
1789 void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
1790 {
1791     uint32_t batchSize = 2;
1792     uint32_t vecSize = 4;
1793     armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1794     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1795             { 0.0f, 0.0f, 0.0f, 0.0f,      //batch 0
1796               0.1f, 0.2f, 0.3f, 0.4f }));  //batch 1
1797
1798     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1799             {         0.0f,          0.0f,        0.0f,        0.0f,      //batch 0
1800               -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f }));  //batch 1
1801
1802     return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1803             vecSize, batchSize, expectedOutput);
1804 }
1805
1806 void LstmUtilsVectorBatchVectorCwiseProductTest()
1807 {
1808     uint32_t batchSize = 4;
1809     uint32_t vecSize = 29;
1810     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1811     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1812             {   1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f, 10.1f,
1813               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1814               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,     0.0f}));
1815
1816     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1817     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1818             { /* batch 0 */
1819                 1.1f,   2.2f,   3.3f,   4.4f,   5.5f,   6.6f,   7.7f,   8.8f,   9.9f,  10.1f,
1820               11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f,  20.2f,
1821               21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f,   0.0f,
1822               /* batch 1 */
1823                 -1.1f,   -2.2f,   -3.3f,   -4.4f,   -5.5f,   -6.6f,   -7.7f,   -8.8f,   -9.9f, -10.1f,
1824               -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
1825               -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f,    0.0f,
1826               /* batch 2 */
1827                 1.1f,   -2.2f,   3.3f,   -4.4f,   5.5f,   -6.6f,   7.7f,   -8.8f,   9.9f, -10.1f,
1828               11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
1829               21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f,   0.0f,
1830               /* batch 3 */
1831                 -1.1f,   2.2f,   -3.3f,   4.4f,   -5.5f,   6.6f,   -7.7f,   8.8f,   -9.9f, 10.1f,
1832               -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
1833               -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f,    0.0f}));
1834
1835     // Expect output = input * output + output.
1836     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1837             { /* batch 0 */
1838                  1.210000f,    4.840000f,   10.889999f,   19.360001f,   30.250000f,   43.559998f,
1839                 59.289997f,   77.440002f,   98.009995f,  102.010010f,  123.432091f,  146.894394f,
1840                172.396896f,  199.939606f,  229.522491f,  261.145599f,  294.808899f,  330.512421f,
1841                368.256134f,  408.040039f,  449.864075f,  493.728363f,  539.632874f,  587.577576f,
1842                637.562500f,  689.587585f,  743.652954f,  799.758423f,    0.000000f,
1843               /* batch 1 */
1844                 -1.210000f,   -4.840000f,  -10.889999f,  -19.360001f,  -30.250000f,  -43.559998f,
1845                -59.289997f,  -77.440002f,  -98.009995f, -102.010010f, -123.432091f, -146.894394f,
1846               -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
1847               -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
1848               -637.562500f, -689.587585f, -743.652954f, -799.758423f,    0.000000f,
1849               /* batch 2 */
1850                  1.210000f,   -4.840000f,  10.889999f,   -19.360001f,   30.250000f,  -43.559998f,
1851                 59.289997f,  -77.440002f,  98.009995f,  -102.010010f,  123.432091f, -146.894394f,
1852                172.396896f, -199.939606f, 229.522491f,  -261.145599f,  294.808899f, -330.512421f,
1853                368.256134f, -408.040039f, 449.864075f,  -493.728363f,  539.632874f, -587.577576f,
1854                637.562500f, -689.587585f, 743.652954f,  -799.758423f,    0.000000f,
1855               /* batch 3 */
1856                 -1.210000f,    4.840000f,  -10.889999f,   19.360001f,  -30.250000f,   43.559998f,
1857                -59.289997f,   77.440002f,  -98.009995f,  102.010010f, -123.432091f,  146.894394f,
1858               -172.396896f,  199.939606f, -229.522491f,  261.145599f, -294.808899f,  330.512421f,
1859               -368.256134f,  408.040039f, -449.864075f,  493.728363f, -539.632874f,  587.577576f,
1860               -637.562500f,  689.587585f, -743.652954f,  799.758423f,    0.000000f}));
1861
1862     return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
1863             vecSize, batchSize, expectedOutput);
1864 }
1865
1866 void LstmUtilsVectorBatchVectorAddTest()
1867 {
1868     uint32_t batchSize = 2;
1869     uint32_t vecSize = 3;
1870     armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1871     boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1872             { 0.0f, -0.5f, 1.0f}));
1873
1874     armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1875     boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1876             { 1.0f, 2.0f, 3.0f,    //batch 0
1877               4.0f, 5.0f, 6.0f})); //batch 1
1878
1879     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1880             { 1.0f, 1.5f, 4.0f,
1881               4.0f, 4.5f, 7.0f}));
1882
1883     return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
1884             vecSize, batchSize, expectedOutput);
1885 }
1886
1887 #endif
1888
1889 LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
1890     armnn::IWorkloadFactory& workloadFactory,
1891     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1892 {
1893     armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
1894     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1895             { 2., 3., 3., 4. }));
1896
1897     armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
1898     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1899             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1900              -0.42734814f, -0.00478661f,  0.13455015f, -0.03560682f}));
1901     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
1902         workloadFactory, memoryManager, input, expectedOutput);
1903 }
1904
1905 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
1906     armnn::IWorkloadFactory& workloadFactory,
1907     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1908 {
1909     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1910     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1911             {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1912              0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1913
1914     armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
1915     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1916             {-0.00396806f, 0.029352f,     -0.00279226f, 0.0159977f,   -0.00835576f,
1917              -0.0211779f,  0.0283512f,    -0.0114597f,  0.00907307f,  -0.0244004f,
1918              -0.0152191f,  -0.0259063f,   0.00914318f,  0.00415118f,  0.017147f,
1919              0.0134203f, -0.013869f,    0.0287268f,   -0.00334693f, 0.00733398f,  -0.0287926f,
1920              -0.0186926f,   0.0193662f,   -0.0115437f,  0.00422612f,  -0.0345232f,
1921              0.00223253f,   -0.00957321f, 0.0210624f,   0.013331f,    0.0150954f,
1922              0.02168f}));
1923     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1924         workloadFactory, memoryManager, input, expectedOutput);
1925 }
1926
1927 LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1928     armnn::IWorkloadFactory& workloadFactory,
1929     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1930 {
1931     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
1932     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1933             {2., 3., 3., 4.}));
1934
1935     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
1936     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1937             {{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
1938               -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f}}));
1939
1940     return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
1941         workloadFactory, memoryManager, input, expectedOutput);
1942 }
1943
1944 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1945         armnn::IWorkloadFactory& workloadFactory,
1946         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1947 {
1948     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1949     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1950             {0.7f, 0.8f, 0.1f, 0.2f, 0.3f,     //batch 0
1951              0.3f, 0.2f, 0.9f, 0.8f, 0.1f}));  //batch 1
1952
1953     armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
1954     boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1955             {  0.0244077f,  0.128027f, -0.00170918f,    //batch 0
1956              -0.00692428f, 0.0848741f,    0.063445f})); //batch 1
1957     return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
1958             workloadFactory, memoryManager, input, expectedOutput);
1959 }
1960
1961 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1962     armnn::IWorkloadFactory& workloadFactory,
1963     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1964 {
1965     const float qScale = 1.0f;
1966     const int32_t qOffset = 0;
1967
1968     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1969     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1970
1971     armnn::TensorInfo inputDesc({2, 2}, datatype);
1972     boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
1973         inputDesc,
1974         armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
1975
1976     armnn::TensorInfo outputDesc({2, 4}, datatype);
1977     boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(
1978         outputDesc,
1979         armnnUtils::QuantizedVector<int16_t>(
1980             {
1981                 -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
1982                 -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
1983             },
1984             qScale, qOffset));
1985
1986     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1987         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1988
1989 }
1990
1991 LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1992     armnn::IWorkloadFactory& workloadFactory,
1993     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1994 {
1995     const float qScale = 1.0f;
1996     const int32_t qOffset = 0;
1997
1998     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1999     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2000
2001     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2002     boost::multi_array<int16_t, 2> input =
2003         MakeTensor<int16_t, 2>(
2004             inputDesc,
2005             armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
2006
2007     armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2008     boost::multi_array<int16_t, 2> expectedOutput =
2009         MakeTensor<int16_t, 2>(
2010             outputDesc,
2011             armnnUtils::QuantizedVector<int16_t>(
2012                 {
2013                     -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2014                     -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
2015                 },
2016                 qScale, qOffset));
2017
2018     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2019         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2020 }
2021
2022 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2023     armnn::IWorkloadFactory& workloadFactory,
2024     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2025 {
2026     const float qScale = 2.0f;
2027     const int32_t qOffset = 0;
2028
2029     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2030     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2031
2032     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2033     boost::multi_array<int16_t, 2> input =
2034         MakeTensor<int16_t, 2>(
2035             inputDesc,
2036             armnnUtils::QuantizedVector<int16_t>(
2037                 {
2038                     0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2039                     0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
2040                 },
2041                 qScale, qOffset));
2042
2043     armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2044     boost::multi_array<int16_t, 2> expectedOutput =
2045         MakeTensor<int16_t, 2>(
2046             outputDesc,
2047             armnnUtils::QuantizedVector<int16_t>(
2048                 {
2049                     -0.00396806f,  0.02935200f, -0.00279226f,  0.01599770f,
2050                     -0.00835576f, -0.02117790f,  0.02835120f, -0.01145970f,
2051                      0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
2052                      0.00914318f,  0.00415118f,  0.01714700f,  0.01342030f,
2053                     -0.01386900f,  0.02872680f, -0.00334693f,  0.00733398f,
2054                     -0.02879260f, -0.01869260f,  0.01936620f, -0.01154370f,
2055                      0.00422612f, -0.03452320f,  0.00223253f, -0.00957321f,
2056                      0.02106240f,  0.01333100f,  0.01509540f,  0.02168000f
2057                 },
2058                 qScale, qOffset));
2059
2060     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2061         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2062 }
2063
2064 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2065     armnn::IWorkloadFactory& workloadFactory,
2066     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2067 {
2068     const float qScale = 1.0f;
2069     const int32_t qOffset = 0;
2070
2071     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2072
2073     armnn::TensorInfo inputDesc({2, 2}, datatype);
2074     boost::multi_array<int16_t , 2> input =
2075         MakeTensor<int16_t , 2>(inputDesc,
2076                                 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
2077
2078     armnn::TensorInfo outputDesc({2, 4}, datatype);
2079     boost::multi_array<int16_t, 2> expectedOutput =
2080         MakeTensor<int16_t, 2>(
2081             outputDesc,
2082             armnnUtils::QuantizedVector<int16_t>(
2083                 {
2084                     -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
2085                     -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
2086                 },
2087                 qScale, qOffset));
2088
2089     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2090         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2091 }
2092
2093 //
2094 // QuantizedLstm
2095 //
2096
2097 LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2098     armnn::IWorkloadFactory& workloadFactory,
2099     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2100 {
2101     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2102     boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2103         {166, 179, 50, 150}));
2104
2105     armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2106     boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2107         {140, 151, 146, 112, 136, 156, 142, 112 }));
2108
2109     return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2110 }