IVGCVSW-3722 Add front end support for ArgMinMax
[platform/upstream/armnn.git] / src / backends / backendsCommon / WorkloadData.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "WorkloadData.hpp"
6
7 #include "CpuTensorHandle.hpp"
8
9 #include <DataLayoutIndexed.hpp>
10
11 #include <algorithm>
12 #include <iomanip>
13 #include <string>
14 #include <sstream>
15
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
18
19 using namespace armnnUtils;
20
21 namespace armnn
22 {
23
24 //---------------------------------------------------------------
25 DataType GetBiasDataType(DataType inputDataType)
26 {
27     switch (inputDataType)
28     {
29         case DataType::Float16:
30             return DataType::Float16;
31         case DataType::Float32:
32             return DataType::Float32;
33         case DataType::QuantisedAsymm8:
34             return DataType::Signed32;
35         case DataType::QuantisedSymm16:
36             return DataType::Signed32;
37         default:
38             BOOST_ASSERT_MSG(false, "Invalid input data type");
39             return DataType::Float32;
40     }
41 }
42
43 namespace
44 {
45
46 //---------------------------------------------------------------
47 //android ndk does not support std::to_string function.
48 template <typename T>
49 std::string to_string(T value)
50 {
51     std::ostringstream os;
52     os << value;
53     return os.str();
54 }
55
56 //---------------------------------------------------------------
57 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
58 {
59     if (!ptr)
60     {
61         throw InvalidArgumentException(descName +  ": Invalid null pointer. The " +
62                                       paramName + " parameter must be set.");
63     }
64 }
65
66 //---------------------------------------------------------------
67 void ValidateTensorShapesMatch(const TensorInfo& first,
68                                const TensorInfo& second,
69                                std::string const& descName,
70                                std::string const& firstName,
71                                std::string const& secondName)
72 {
73     if (first.GetShape() != second.GetShape())
74     {
75         throw InvalidArgumentException(descName + ": "
76                                        + firstName + " & " + secondName + " must have identical shapes");
77     }
78 }
79
80 //---------------------------------------------------------------
81 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
82 {
83     if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
84     {
85         throw InvalidArgumentException(descName +
86                                        ": Requires exactly " + to_string(expectedSize) + "input(s). " +
87                                        to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
88     }
89 }
90
91 //---------------------------------------------------------------
92 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
93 {
94     if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
95     {
96         throw InvalidArgumentException(descName +
97                                        ": Requires exactly " + to_string(expectedSize) + " output(s). " +
98                                        to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
99     }
100 }
101
102 //---------------------------------------------------------------
103 void ValidateTensorNumDimensions(const TensorInfo& tensor,
104                                  std::string const& descName,
105                                  unsigned int numDimensions,
106                                  std::string const& tensorName)
107 {
108     if (tensor.GetNumDimensions() != numDimensions)
109     {
110         throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111             to_string(tensor.GetNumDimensions()) + " dimensions for " +
112             tensorName + " tensor.");
113     }
114 }
115
116 //---------------------------------------------------------------
117 void ValidateTensorNumElements(const TensorInfo& tensor,
118                                std::string const& descName,
119                                unsigned int numElements,
120                                std::string const& tensorName)
121 {
122     if (tensor.GetNumElements() != numElements)
123     {
124         throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
125                                        to_string(tensor.GetNumElements()) + " elements for " +
126                                        tensorName + " tensor.");
127     }
128 }
129
130 //---------------------------------------------------------------
131 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
132                                  unsigned int numDimension,
133                                  unsigned int numElements,
134                                  std::string const& tensorName)
135 {
136     const std::string functionName{"ValidateTensorNumDimNumElem"};
137     ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
138     ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
139 }
140
141 //---------------------------------------------------------------
142 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
143     const std::string& descName, std::string const& tensorName)
144 {
145     if (tensor.GetDataType() != dataType)
146     {
147         throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
148             GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
149     }
150 }
151
152 //---------------------------------------------------------------
153 void ValidateTensorQuantizationSpace(const TensorInfo& first,
154                                      const TensorInfo& second,
155                                      const std::string& descName,
156                                      std::string const& firstName,
157                                      std::string const& secondName)
158 {
159     if (!first.IsQuantized() ||
160         !second.IsQuantized())
161     {
162         // Not a quantized type, ignore the validation
163         return;
164     }
165
166     DataType firstDataType  = first.GetDataType();
167     DataType secondDataType = second.GetDataType();
168
169     if (firstDataType != secondDataType)
170     {
171         throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
172                                        " must be of the same quantized type, " +
173                                        firstName + " is " + GetDataTypeName(firstDataType) + ", " +
174                                        secondName + " is " + GetDataTypeName(secondDataType));
175     }
176
177     if (!first.IsTypeSpaceMatch(second))
178     {
179         throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
180                                        " must have the same quantization space, " +
181                                        firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
182                                        " and scale " + to_string(first.GetQuantizationScale()) + ", " +
183                                        secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
184                                        " and scale " + to_string(second.GetQuantizationScale()));
185     }
186 }
187
188 //---------------------------------------------------------------
189 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
190                                     const TensorInfo& inputTensorInfo,
191                                     const TensorInfo& weightsTensorInfo,
192                                     const std::string& descName)
193 {
194     if (biasTensor.GetQuantizationOffset() != 0)
195     {
196         throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
197             to_string(biasTensor.GetQuantizationOffset()));
198     }
199     const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
200     if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
201     {
202         // Print the float values with extra precision to see very small differences
203         std::stringstream msg;
204         msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
205             " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
206             biasTensor.GetQuantizationScale();
207         throw InvalidArgumentException(msg.str());
208     }
209 }
210
211 //---------------------------------------------------------------
212 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
213     unsigned int numExpected,
214     const std::string& descName,
215     const std::string& varName)
216 {
217     if (vec.empty() && numExpected > 0)
218     {
219         throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
220     }
221
222     for (unsigned int i = 0; i < numExpected; ++i)
223     {
224         if (!vec[i])
225         {
226             throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
227         }
228     }
229 }
230
231 //---------------------------------------------------------------
232 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
233                                         const TensorInfo& second,
234                                         const TensorInfo& output,
235                                         std::string const& descName,
236                                         std::string const& firstName,
237                                         std::string const& secondName)
238 {
239     // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
240     // broadcasted.
241     if (first.GetNumDimensions() != second.GetNumDimensions())
242     {
243         throw InvalidArgumentException(descName  + ": Tensors "
244             + firstName + " & " + secondName
245             + " must have the same number of dimensions in order to be broadcasted");
246     }
247     uint32_t numDims = first.GetNumDimensions();
248     std::vector<uint32_t> outputDims(numDims, 0u);
249     for (uint32_t i = 0; i < numDims; i++)
250     {
251         const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
252         const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
253         if (dimsNotEqual && dimsNotOne)
254         {
255             throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
256         }
257         outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
258     }
259     TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
260     if (broadcastShape != output.GetShape())
261     {
262         throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
263                                        + firstName + " & " + secondName
264                                        + " does not match the output shape");
265     }
266 }
267
268 //---------------------------------------------------------------
269 void ValidateDataTypes(const TensorInfo& info,
270                        const std::vector<armnn::DataType>& supportedTypes,
271                        std::string const& descName)
272 {
273     auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
274     if (iterator == supportedTypes.end())
275     {
276         throw InvalidArgumentException(descName  + ": " + " Tensor type is not supported.");
277     }
278 }
279
280 //---------------------------------------------------------------
281 void ValidateTensorDataTypesMatch(const TensorInfo& first,
282                                   const TensorInfo& second,
283                                   std::string const& descName,
284                                   std::string const& firstName,
285                                   std::string const& secondName)
286 {
287     if (first.GetDataType() != second.GetDataType())
288     {
289         throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
290                                        " must have identical data types.");
291     }
292 }
293
294 //---------------------------------------------------------------
295 void ValidateTensorNumElementsMatch(const TensorInfo& first,
296                                     const TensorInfo& second,
297                                     std::string const& descName,
298                                     std::string const& firstName,
299                                     std::string const& secondName)
300 {
301     if (first.GetNumElements() != second.GetNumElements())
302     {
303         throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
304                                        " must have the same number of elements.");
305     }
306 }
307
308 } // anonymous namespace
309
310 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
311     unsigned int numExpectedIn, unsigned int numExpectedOut) const
312 {
313     ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
314     ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
315 }
316
317 //---------------------------------------------------------------
318 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
319 {
320     const std::string descriptorName{"MemCopyQueueDescriptor"};
321
322     ValidateNumInputs(workloadInfo,  descriptorName, 1);
323     ValidateNumOutputs(workloadInfo, descriptorName , 1);
324
325     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
326     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
327
328     ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
329     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
330
331     if (m_Inputs.size() != m_Outputs.size())
332     {
333         throw InvalidArgumentException(boost::str(
334             boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
335                           descriptorName % m_Inputs.size() % m_Outputs.size()));
336     }
337
338     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
339     {
340         if (!m_Inputs[i])
341         {
342             throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
343                                                       descriptorName % i));
344         }
345
346         if (!m_Outputs[i])
347         {
348             throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
349                                                       descriptorName % i));
350         }
351     }
352 }
353
354 //---------------------------------------------------------------
355 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
356 {
357     ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
358     ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
359
360     if (workloadInfo.m_InputTensorInfos.size() != 1)
361     {
362         throw InvalidArgumentException(boost::str(
363             boost::format("Number of input infos (%1%) is not 1.")
364             % workloadInfo.m_InputTensorInfos.size()));
365
366     }
367
368     if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
369     {
370         throw InvalidArgumentException(boost::str(
371             boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
372             % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
373     }
374
375     for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
376     {
377         if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
378             workloadInfo.m_OutputTensorInfos[i].GetNumElements())
379         {
380             throw InvalidArgumentException(boost::str(
381                 boost::format("Number of elements for tensor input and output %1% does not match")
382                 % i ));
383         }
384     }
385
386     if (m_Inputs.size() != 1)
387     {
388         throw InvalidArgumentException(boost::str(
389             boost::format("Number of inputs (%1%) is not 1.")
390             % m_Inputs.size()));
391     }
392
393     if (m_Inputs.size() != m_Outputs.size())
394     {
395         throw InvalidArgumentException(boost::str(
396             boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
397             % m_Inputs.size() % m_Outputs.size()));
398     }
399
400     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
401     {
402         if (!m_Inputs[i])
403         {
404             throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
405         }
406
407         if (!m_Outputs[i])
408         {
409             throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
410         }
411     }
412 }
413
414 //---------------------------------------------------------------
415 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
416 {
417     ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
418     ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
419
420     if (m_Inputs.size() != 1)
421     {
422         throw InvalidArgumentException(boost::str(
423             boost::format("Number of inputs (%1%) is not 1.")
424             % m_Inputs.size()));
425     }
426
427     if (m_Outputs.size() != 0)
428     {
429         throw InvalidArgumentException(boost::str(
430             boost::format("Number of outputs (%1%) is not 0.")
431             % m_Inputs.size() % m_Outputs.size()));
432     }
433
434     if (!m_Inputs[0])
435     {
436         throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
437     }
438 }
439
440 //---------------------------------------------------------------
441 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
442 {
443     const std::string descriptorName{"ActivationQueueDescriptor"};
444
445     ValidateNumInputs(workloadInfo,  descriptorName, 1);
446     ValidateNumOutputs(workloadInfo, descriptorName, 1);
447
448     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
449     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
450
451     std::vector<DataType> supportedTypes =
452     {
453             DataType::Float16,
454             DataType::Float32,
455             DataType::QuantisedAsymm8,
456             DataType::QuantisedSymm16
457     };
458
459     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
460     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
461     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
462 }
463
464 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
465 {
466     const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
467
468     ValidateNumInputs(workloadInfo,  descriptorName, 1);
469     ValidateNumOutputs(workloadInfo, descriptorName, 1);
470
471     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
472     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
473
474     std::vector<DataType> supportedTypes =
475             {
476                     DataType::Float16,
477                     DataType::Float32,
478                     DataType::QuantisedAsymm8,
479                     DataType::QuantisedSymm16
480             };
481
482     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
483     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
484     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
485 }
486
487 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
488 {
489     const std::string descriptorName{"SoftmaxQueueDescriptor"};
490
491     ValidateNumInputs(workloadInfo,  descriptorName, 1);
492     ValidateNumOutputs(workloadInfo, descriptorName, 1);
493
494     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
495     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
496
497     std::vector<DataType> supportedTypes =
498     {
499             DataType::Float16,
500             DataType::Float32,
501             DataType::QuantisedAsymm8,
502             DataType::QuantisedSymm16
503     };
504
505     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
506     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
507     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
508 }
509
510 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
511 {
512     const std::string descriptorName{"SplitterQueueDescriptor"};
513
514     ValidateNumInputs(workloadInfo, descriptorName, 1);
515
516     // Check the supported data types
517     std::vector<DataType> supportedTypes =
518     {
519             DataType::Float32,
520             DataType::Float16,
521             DataType::Boolean,
522             DataType::Signed32,
523             DataType::QuantisedAsymm8,
524             DataType::QuantisedSymm16
525     };
526
527     const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
528     for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
529     {
530         const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
531         ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
532
533         const std::string outputName = "output_" + std::to_string(i);
534         ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
535     }
536
537     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
538     {
539         throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
540     }
541
542     if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
543     {
544         throw InvalidArgumentException(
545             descriptorName + ": Number of split windows "
546             "has to match number of workloadInfo.m_OutputTensorInfos. "
547             "Number of windows: " +
548             to_string(m_ViewOrigins.size()) +
549             ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
550     }
551
552     //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
553     std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
554     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
555     {
556         //Checks that the dimensionality of input is same as the split windows.
557         ViewOrigin const& e = m_ViewOrigins[w];
558         if (e.m_Origin.size() != inputDims)
559         {
560             throw InvalidArgumentException(descriptorName + ": Window origin have to "
561                                            "have the same dimensionality as the input tensor. "
562                                            "Window origin (index: " +
563                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
564                                            " dimensions, the input "
565                                            "tensor has " +
566                                            to_string(inputDims) + " dimensions.");
567         }
568         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
569         {
570             if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
571                 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
572             {
573                 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
574                                                "be smaller or equal than the size of the input in that coord.");
575             }
576         }
577     }
578 }
579
580 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
581 {
582     const std::string descriptorName{"ConcatQueueDescriptor"};
583
584     ValidateNumOutputs(workloadInfo, descriptorName, 1);
585
586     if (m_Inputs.size() <= 0)
587     {
588         throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
589     }
590     if (m_Outputs.size() <= 0)
591     {
592         throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
593     }
594
595     if (workloadInfo.m_InputTensorInfos.size() <= 0)
596     {
597         throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
598     }
599     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
600     {
601         throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
602     }
603
604     if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
605     {
606         throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
607     }
608
609     if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
610     {
611         return;
612     }
613
614     if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
615     {
616         throw InvalidArgumentException(
617             descriptorName + ": Number of split windows "
618             "has to match number of workloadInfo.m_InputTensorInfos. "
619             "Number of windows: " +
620             to_string(m_ViewOrigins.size()) +
621             ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
622     }
623
624     //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
625     std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
626     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
627     {
628         //Checks that the dimensionality of output is same as the split windows.
629         ViewOrigin const& e = m_ViewOrigins[w];
630         if (e.m_Origin.size() != outputDims)
631         {
632             throw InvalidArgumentException(descriptorName + ": Window origin have to "
633                                            "have the same dimensionality as the output tensor. "
634                                            "Window origin (index: " +
635                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
636                                            " dimensions, the output "
637                                            "tensor has " +
638                                            to_string(outputDims) + " dimensions.");
639         }
640         //Checks that the merge windows are within the output tensor.
641         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
642         {
643             if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
644                 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
645             {
646                 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
647                                                "be smaller or equal than the size of the output in that coord.");
648             }
649         }
650     }
651
652     // Check the supported data types
653     std::vector<DataType> supportedTypes =
654     {
655             DataType::Float32,
656             DataType::Float16,
657             DataType::Boolean,
658             DataType::Signed32,
659             DataType::QuantisedAsymm8,
660             DataType::QuantisedSymm16
661     };
662
663     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
664     for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
665     {
666         const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
667         ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
668
669         const std::string inputName = "input_" + std::to_string(i);
670         ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
671     }
672 }
673
674 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
675 {
676     const std::string descriptorName{"StackQueueDescriptor"};
677
678     ValidateNumOutputs(workloadInfo, descriptorName, 1);
679
680     if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
681     {
682         throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
683     }
684
685     // All inputs must have the same shape, which is defined in parameters
686     const TensorShape& inputShape = m_Parameters.m_InputShape;
687     for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
688     {
689         if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
690         {
691             throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
692         }
693     }
694
695     if (inputShape.GetNumDimensions() > 4)
696     {
697         throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
698     }
699
700     // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
701     // since the output tensor has an additional dimension.
702     if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
703     {
704         throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
705                                        "than the number of input dimensions.");
706     }
707
708     // Output shape must be as inferred from the input shape
709     const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
710     for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
711     {
712         if (outputShape[i] != inputShape[i])
713         {
714             throw InvalidArgumentException(descriptorName + ": Output tensor must "
715                                            "match shape inferred from input tensor.");
716         }
717     }
718
719     if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
720     {
721         throw InvalidArgumentException(descriptorName + ": Output tensor must "
722                                        "match shape inferred from input tensor.");
723     }
724
725     for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
726     {
727         if (outputShape[i] != inputShape[i-1])
728         {
729             throw InvalidArgumentException(descriptorName + ": Output tensor must "
730                                            "match shape inferred from input tensor.");
731         }
732     }
733
734     if (outputShape.GetNumDimensions() > 5)
735     {
736         throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
737     }
738
739     // Check the supported data types
740     std::vector<DataType> supportedTypes =
741     {
742             DataType::Float32,
743             DataType::Float16,
744             DataType::Boolean,
745             DataType::Signed32,
746             DataType::QuantisedAsymm8,
747             DataType::QuantisedSymm16
748     };
749
750     ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
751
752     for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
753     {
754         ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
755                                      workloadInfo.m_InputTensorInfos[i],
756                                      descriptorName,
757                                      "input_0",
758                                      "input_" + std::to_string(i));
759     }
760
761     ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
762                                  workloadInfo.m_OutputTensorInfos[0],
763                                  descriptorName,
764                                  "input_0",
765                                  "output");
766 }
767
768 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
769 {
770     const std::string descriptorName{"FullyConnectedQueueDescriptor"};
771
772     ValidateNumInputs(workloadInfo,  descriptorName, 1);
773     ValidateNumOutputs(workloadInfo, descriptorName, 1);
774
775     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
776     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
777
778     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
779
780     if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
781     {
782         throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
783     }
784
785     ValidatePointer(m_Weight, descriptorName, "weight");
786
787     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
788     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
789
790     if (m_Parameters.m_BiasEnabled)
791     {
792         ValidatePointer(m_Bias, descriptorName, "bias");
793
794         // Validates type and quantization values.
795         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
796         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
797
798         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
799         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
800     }
801
802     // Check the supported data types
803     std::vector<DataType> supportedTypes =
804     {
805             DataType::Float32,
806             DataType::Float16,
807             DataType::QuantisedAsymm8,
808             DataType::QuantisedSymm16
809     };
810
811     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
812     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
813 }
814
815 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
816 {
817     const std::string descriptorName{"NormalizationQueueDescriptor"};
818
819     ValidateNumInputs(workloadInfo,  descriptorName, 1);
820     ValidateNumOutputs(workloadInfo, descriptorName, 1);
821
822     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
823     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
824
825     // Check the supported data types
826     std::vector<DataType> supportedTypes =
827     {
828         DataType::Float16,
829         DataType::Float32,
830         DataType::QuantisedAsymm8,
831         DataType::QuantisedSymm16
832     };
833
834     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
835
836     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
837
838     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
839 }
840
841 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
842 {
843     const std::string descriptorName{"AdditionQueueDescriptor"};
844
845     ValidateNumInputs(workloadInfo,  descriptorName, 2);
846     ValidateNumOutputs(workloadInfo, descriptorName, 1);
847
848     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
849     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
850     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
851
852     std::vector<DataType> supportedTypes =
853     {
854         DataType::Float32,
855         DataType::QuantisedAsymm8,
856         DataType::QuantisedSymm16,
857         DataType::Float16
858     };
859
860     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
861     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
862     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
863
864     ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
865     ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
866
867     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
868                                        inputTensorInfo1,
869                                        outputTensorInfo,
870                                        descriptorName,
871                                        "input_0",
872                                        "input_1");
873 }
874
875 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
876 {
877     const std::string descriptorName{"MultiplicationQueueDescriptor"};
878
879     ValidateNumInputs(workloadInfo,  descriptorName, 2);
880     ValidateNumOutputs(workloadInfo, descriptorName, 1);
881
882     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
883     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
884     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
885
886     std::vector<DataType> supportedTypes =
887     {
888         DataType::Float32,
889         DataType::QuantisedAsymm8,
890         DataType::QuantisedSymm16,
891         DataType::Float16
892     };
893
894     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
895     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
896     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
897
898     ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
899     ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
900
901     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
902                                        inputTensorInfo1,
903                                        outputTensorInfo,
904                                        descriptorName,
905                                        "input_0",
906                                        "input_1");
907 }
908
909 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
910 {
911     const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
912
913     ValidateNumInputs(workloadInfo,  descriptorName, 1);
914     ValidateNumOutputs(workloadInfo, descriptorName, 1);
915
916     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
917     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
918
919     std::vector<DataType> supportedTypes =
920     {
921         DataType::Float16,
922         DataType::Float32,
923         DataType::QuantisedAsymm8,
924         DataType::QuantisedSymm16
925     };
926
927     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
928     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
929
930     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
931     ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
932     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
933
934     ValidatePointer(m_Mean,     descriptorName, "mean");
935     ValidatePointer(m_Variance, descriptorName, "variance");
936     ValidatePointer(m_Beta,     descriptorName, "beta");
937     ValidatePointer(m_Gamma,    descriptorName, "gamma");
938
939     const TensorInfo& mean     = m_Mean->GetTensorInfo();
940     const TensorInfo& variance = m_Variance->GetTensorInfo();
941     const TensorInfo& beta     = m_Beta->GetTensorInfo();
942     const TensorInfo& gamma    = m_Gamma->GetTensorInfo();
943
944     ValidateTensorNumDimensions(mean,     descriptorName, 1, "mean");
945     ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
946     ValidateTensorNumDimensions(beta,     descriptorName, 1, "beta");
947     ValidateTensorNumDimensions(gamma,    descriptorName, 1, "gamma");
948
949     ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
950     ValidateTensorShapesMatch(mean, beta,     descriptorName, "mean", "beta");
951     ValidateTensorShapesMatch(mean, gamma,    descriptorName, "mean", "gamma");
952 }
953
954 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
955 {
956     const std::string descriptorName{"Convolution2dQueueDescriptor"};
957
958     ValidateNumInputs(workloadInfo,  descriptorName, 1);
959     ValidateNumOutputs(workloadInfo, descriptorName, 1);
960
961     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
962     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
963
964     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
965     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
966
967     ValidatePointer(m_Weight, descriptorName, "weight");
968
969     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
970     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
971
972     ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
973
974     if (m_Parameters.m_BiasEnabled)
975     {
976         ValidatePointer(m_Bias, descriptorName, "bias");
977
978         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
979         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
980
981         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
982         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
983     }
984
985     std::vector<DataType> supportedTypes =
986     {
987         DataType::Float32,
988         DataType::QuantisedAsymm8,
989         DataType::QuantisedSymm16,
990         DataType::Float16
991     };
992
993     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
994     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
995 }
996
997 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
998 {
999     const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1000
1001     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1002     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1003
1004     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1005     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1006
1007     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1008     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1009
1010     ValidatePointer(m_Weight, descriptorName, "weight");
1011
1012     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1013     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1014
1015     if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1016     {
1017         throw InvalidArgumentException(
1018             boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
1019                                      "cannot be smaller than 1.") % descriptorName %
1020                                      m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1021     }
1022
1023     const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1024
1025     // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1026     // inputChannels * channelMultiplier should be equal to outputChannels.
1027     const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1028     const unsigned int numWeightInputChannels     = weightTensorInfo.GetShape()[1];
1029     const unsigned int numWeightOutputChannels    = outputTensorInfo.GetShape()[channelIndex];
1030     if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1031     {
1032         throw InvalidArgumentException(
1033             boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1034                                      "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1035                                      "(provided %4%).") % descriptorName % numWeightOutputChannels %
1036                                      numWeightInputChannels % numWeightChannelMultiplier));
1037     }
1038
1039     ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
1040
1041     if (m_Parameters.m_BiasEnabled)
1042     {
1043         ValidatePointer(m_Bias, descriptorName, "bias");
1044
1045         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
1046         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1047
1048         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1049         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1050     }
1051
1052     std::vector<DataType> supportedTypes =
1053     {
1054         DataType::Float32,
1055         DataType::QuantisedAsymm8,
1056         DataType::QuantisedSymm16,
1057         DataType::Float16
1058     };
1059
1060     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1061     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1062 }
1063
1064 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1065 {
1066     const std::string descriptorName{"PermuteQueueDescriptor"};
1067
1068     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1069     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1070
1071     const PermutationVector& mapping = m_Parameters.m_DimMappings;
1072
1073     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1074     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1075
1076     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, mapping.GetSize(), "input");
1077     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1078
1079     for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1080     {
1081         if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1082         {
1083             throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1084                                            " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1085                                            "must match dst dimension " + to_string(mapping[i]) +
1086                                            " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1087         }
1088     }
1089
1090     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1091 }
1092
1093 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1094 {
1095     const std::string descriptorName{"Pooling2dQueueDescriptor"};
1096
1097     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1098     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1099
1100     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1101     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1102
1103     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1104     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1105
1106     std::vector<DataType> supportedTypes =
1107     {
1108         DataType::Float32,
1109         DataType::Float16,
1110         DataType::QuantisedAsymm8,
1111         DataType::QuantisedSymm16
1112     };
1113
1114     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1115     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1116 }
1117
1118 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1119 {
1120     const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1121
1122     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1123     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1124
1125     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1126     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1127
1128     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1129     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1130
1131     std::vector<DataType> supportedTypes =
1132     {
1133         DataType::Float16,
1134         DataType::Float32,
1135         DataType::QuantisedAsymm8,
1136         DataType::QuantisedSymm16
1137     };
1138
1139     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1140     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1141
1142     // ResizeBilinear only changes width and height: batch and channel count must match.
1143     const unsigned int inputBatchSize  = inputTensorInfo.GetShape()[0];
1144     const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1145     if (inputBatchSize != outputBatchSize)
1146     {
1147         throw InvalidArgumentException(
1148             boost::str(boost::format("%1%: Input batch size (%2%) "
1149                 "does not match output batch size (%3%)") %
1150                 descriptorName % inputBatchSize % outputBatchSize));
1151     }
1152
1153     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1154     const unsigned int inputChannelCount  = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1155     const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1156     if (inputChannelCount != outputChannelCount)
1157     {
1158         throw InvalidArgumentException(
1159             boost::str(boost::format("%1%: Input channel count (%2%) "
1160                 "does not match output channel count (%3%)") %
1161                 descriptorName % inputChannelCount % outputChannelCount));
1162     }
1163 }
1164
1165 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1166 {
1167     const std::string descriptorName{"ResizeQueueDescriptor"};
1168
1169     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1170     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1171
1172     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1173     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1174
1175     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1176     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1177
1178     std::vector<DataType> supportedTypes =
1179     {
1180         DataType::Float16,
1181         DataType::Float32,
1182         DataType::QuantisedAsymm8,
1183         DataType::QuantisedSymm16
1184     };
1185
1186     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1187     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1188
1189     // Resize only changes width and height: batch and channel count must match.
1190     const unsigned int inputBatchSize  = inputTensorInfo.GetShape()[0];
1191     const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1192     if (inputBatchSize != outputBatchSize)
1193     {
1194         throw InvalidArgumentException(
1195                 boost::str(boost::format("%1%: Input batch size (%2%) "
1196                            "does not match output batch size (%3%)") %
1197                            descriptorName % inputBatchSize % outputBatchSize));
1198     }
1199
1200     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1201     const unsigned int inputChannelCount  = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1202     const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1203     if (inputChannelCount != outputChannelCount)
1204     {
1205         throw InvalidArgumentException(
1206                 boost::str(boost::format("%1%: Input channel count (%2%) "
1207                            "does not match output channel count (%3%)") %
1208                            descriptorName % inputChannelCount % outputChannelCount));
1209     }
1210 }
1211
1212 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1213 {
1214     const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1215
1216     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1217     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1218
1219     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1220     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1221
1222     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 2, "input");
1223     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1224
1225     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo,  descriptorName, "input", "output");
1226
1227     if (m_Parameters.m_Min > m_Parameters.m_Max)
1228     {
1229         throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1230     }
1231 }
1232
1233 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1234 {
1235     const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1236
1237     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1238     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1239
1240     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1241     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1242
1243     if (inputTensorInfo.GetNumDimensions() > 4)
1244     {
1245         throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1246     }
1247
1248     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1249
1250     // Check the supported data types
1251     std::vector<DataType> supportedTypes =
1252     {
1253         DataType::Float32,
1254         DataType::Float16,
1255         DataType::QuantisedAsymm8,
1256         DataType::QuantisedSymm16
1257     };
1258
1259     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1260     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1261
1262     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1263 }
1264
1265 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1266 {
1267     const std::string descriptorName{"ConstantQueueDescriptor"};
1268
1269     ValidateNumInputs(workloadInfo,  descriptorName, 0);
1270     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1271
1272     if (!m_LayerOutput)
1273     {
1274         throw InvalidArgumentException(descriptorName + ": No const input specified.");
1275     }
1276
1277     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1278     ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1279
1280     // Check the supported data types
1281     std::vector<DataType> supportedTypes =
1282     {
1283         DataType::Float32,
1284         DataType::Float16,
1285         DataType::Signed32,
1286         DataType::QuantisedAsymm8,
1287         DataType::QuantisedSymm16
1288     };
1289
1290     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1291 }
1292
1293 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1294 {
1295     const std::string descriptorName{"ReshapeQueueDescriptor"};
1296
1297     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1298     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1299
1300     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1301     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1302
1303     ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1304
1305     // Check the supported data types
1306     std::vector<DataType> supportedTypes =
1307     {
1308         DataType::Float32,
1309         DataType::Float16,
1310         DataType::QuantisedAsymm8,
1311         DataType::QuantisedSymm16
1312     };
1313
1314     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1315     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1316 }
1317
1318 void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1319 {
1320     const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1321
1322     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1323     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1324
1325     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1326     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1327
1328     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1329     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1330
1331     if (m_Parameters.m_BlockShape.size() != 2)
1332     {
1333         throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1334     }
1335
1336     if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1337     {
1338         throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1339                                        "dimensions as Block Shape.");
1340     }
1341
1342     const TensorShape& inputShape = inputTensorInfo.GetShape();
1343
1344     std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1345     std::pair<unsigned int, unsigned int> widthPad  = m_Parameters.m_PadList[1];
1346
1347     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1348
1349     const unsigned int inputWidth  = inputShape[dimensionIndices.GetWidthIndex()] +
1350                                      widthPad.first + widthPad.second;
1351     const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1352                                      heightPad.first + heightPad.second;
1353
1354     const unsigned int numInputElements  = inputShape[0] * inputHeight * inputWidth *
1355                                            inputShape[dimensionIndices.GetChannelsIndex()];
1356     const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1357
1358     if (numOutputElements != numInputElements)
1359     {
1360         throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1361             to_string(numInputElements) + " after padding but output tensor has " +
1362             to_string(numOutputElements) + " elements.");
1363     }
1364
1365     if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1366     {
1367         throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1368                                        "divisible by Block Shape in all spatial dimensions");
1369     }
1370
1371     std::vector<DataType> supportedTypes =
1372     {
1373             DataType::Float16,
1374             DataType::Float32,
1375             DataType::QuantisedAsymm8,
1376             DataType::QuantisedSymm16
1377     };
1378
1379     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1380     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1381 }
1382
1383 void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1384 {
1385     const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1386
1387     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1388     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1389
1390     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1391     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1392
1393     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1394     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1395
1396     std::vector<DataType> supportedTypes =
1397     {
1398         DataType::Float32,
1399         DataType::Float16,
1400         DataType::QuantisedAsymm8,
1401         DataType::QuantisedSymm16
1402     };
1403
1404     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1405     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1406
1407     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1408     const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1409     const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1410     const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1411
1412     const TensorShape& inputShape = inputTensorInfo.GetShape();
1413
1414     const unsigned int numInputElements  =
1415         inputShape[0] * inputShape[wIndex] * inputShape[hIndex] * inputShape[cIndex];
1416     const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1417
1418     if (numOutputElements != numInputElements)
1419     {
1420         throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1421             std::to_string(numInputElements) + " but output tensor has " +
1422             std::to_string(numOutputElements) + " elements.");
1423     }
1424
1425     if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex]  % m_Parameters.m_BlockSize != 0)
1426     {
1427         throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1428                                        "by block size in all spatial dimensions");
1429     }
1430 }
1431
1432 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1433 {
1434     const std::string descriptorName{"FloorQueueDescriptor"};
1435
1436     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1437     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1438
1439     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1440     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1441
1442     std::vector<DataType> supportedTypes =
1443     {
1444         DataType::Float32,
1445         DataType::QuantisedSymm16
1446     };
1447
1448     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1449
1450     if (inputTensorInfo != outputTensorInfo)
1451     {
1452         throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1453     }
1454 }
1455
1456 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1457 {
1458     // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1459
1460     const std::string descriptorName{"LstmQueueDescriptor"};
1461
1462     // check dimensions of all inputs and outputs
1463     if (workloadInfo.m_InputTensorInfos.size() != 3)
1464     {
1465         throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1466     }
1467     if (workloadInfo.m_OutputTensorInfos.size() != 4)
1468     {
1469         throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1470     }
1471
1472     std::vector<DataType> supportedTypes =
1473     {
1474         DataType::Float16,
1475         DataType::Float32,
1476         DataType::QuantisedSymm16
1477     };
1478
1479     // check for supported type of one input and match them with all the other input and output
1480     ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1481
1482     // type matches all other inputs
1483     for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1484     {
1485         ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1486                                      workloadInfo.m_InputTensorInfos[i],
1487                                      descriptorName,
1488                                      "input_0",
1489                                      "input_" + std::to_string(i));
1490     }
1491     // type matches all other outputs
1492     for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1493     {
1494         ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1495                                      workloadInfo.m_OutputTensorInfos[i],
1496                                      "LstmQueueDescriptor",
1497                                      "input_0",
1498                                      "output_" + std::to_string(i));
1499     }
1500
1501     // TODO: check clipping parameter is valid
1502
1503     // Inferring batch size, number of outputs and number of cells from the inputs.
1504     // TODO: figure out if there is a way to make sure the specific inputs are at that index of workloadInfo
1505     const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1506     const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1507     ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1508     const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1509     ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1510     const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1511
1512     // input tensor
1513     ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1514                                 descriptorName + " input_0");
1515     // outputStateInTensor
1516     ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1517                                 descriptorName + " input_1");
1518     // outputStateInTensor
1519     ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1520                                 descriptorName + " input_2");
1521     // scratchBufferTensor
1522     unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1523     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1524                                 descriptorName + " output_0");
1525     // outputStateOutTensor
1526     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1527                                 descriptorName + " output_1");
1528     // cellStateOutTensor
1529     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1530                                 descriptorName + " output_2");
1531     // outputTensor
1532     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1533                                 descriptorName + " output_3");
1534
1535
1536     // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1537     if ( m_InputToInputWeights )
1538     {
1539         ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1540                                       (n_cell * n_input), "InputLayerNormWeights");
1541     }
1542
1543     ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1544     ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1545                                   (n_cell * n_input), "InputToForgetWeights");
1546
1547     ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1548     ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1549                                   (n_cell * n_input), "InputToCellWeights");
1550
1551     if ( m_RecurrentToInputWeights )
1552     {
1553         ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1554                                       (n_cell * n_output), "RecurrentToInputWeights");
1555     }
1556
1557     ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1558     ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1559                                   (n_cell * n_output), "RecurrentToForgetWeights");
1560
1561     ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1562     ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1563                                   (n_cell * n_output), "RecurrentToCellWeights");
1564
1565     // Make sure the input-gate's parameters are either both present (regular
1566     // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1567     bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1568                                      !m_Parameters.m_CifgEnabled) ||
1569                                      (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1570                                      m_Parameters.m_CifgEnabled));
1571     if (!cifg_weights_all_or_none)
1572     {
1573         throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1574                                        "RecurrentToInputWeights must either both be present (regular LSTM) "
1575                                        "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1576                                        "accordingly.");
1577     }
1578
1579     if ( m_CellToInputWeights )
1580     {
1581         ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1582                                       n_cell, "CellToInputWeights");
1583     }
1584     if ( m_CellToForgetWeights )
1585     {
1586         ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1587                                       n_cell, "CellToForgetWeights");
1588     }
1589     if ( m_CellToOutputWeights )
1590     {
1591         ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1592                                       n_cell, "CellToOutputWeights");
1593     }
1594
1595     // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1596     bool peephole_weights_all_or_none =
1597             (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) &&  m_CellToForgetWeights
1598             && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1599             || ( !m_CellToInputWeights && !m_CellToForgetWeights
1600             && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1601     if (!peephole_weights_all_or_none)
1602     {
1603         throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1604     }
1605
1606     // Make sure the input gate bias is present only when not a CIFG-LSTM.
1607     if (m_Parameters.m_CifgEnabled)
1608     {
1609         if (m_InputGateBias)
1610         {
1611             throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1612         }
1613     }
1614     else
1615     {
1616         if (!m_InputGateBias)
1617         {
1618             throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1619                                            "must be present.");
1620         }
1621         ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1622                                       n_cell, "InputGateBias");
1623     }
1624
1625     ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1626     ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1627
1628     ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1629     ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1630
1631     ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1632     ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1633
1634     if (m_ProjectionWeights)
1635     {
1636         ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1637                                       (n_cell * n_output), "ProjectionWeights");
1638     }
1639     if (m_ProjectionBias)
1640     {
1641         ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1642     }
1643
1644     // Making sure the projection tensors are consistent:
1645     // 1) If projection weight is not present, then projection bias should not be
1646     // present.
1647     // 2) If projection weight is present, then projection bias is optional.
1648     bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1649                                         !m_Parameters.m_ProjectionEnabled)
1650                                         || (m_ProjectionWeights && !m_ProjectionBias &&
1651                                         m_Parameters.m_ProjectionEnabled)
1652                                         || (m_ProjectionWeights && m_ProjectionBias &&
1653                                         m_Parameters.m_ProjectionEnabled));
1654     if (!projecton_tensors_consistent)
1655     {
1656         throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1657     }
1658
1659     // The four layer normalization weights either all have values or none of them have values. Additionally, if
1660     // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1661     // either all have values or none of them have values. Layer normalization is used when the values of all the
1662     // layer normalization weights are present
1663     if (m_InputLayerNormWeights)
1664     {
1665         ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1666     }
1667     if (m_ForgetLayerNormWeights)
1668     {
1669         ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1670     }
1671     if (m_CellLayerNormWeights)
1672     {
1673         ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1674     }
1675     if (m_OutputLayerNormWeights)
1676     {
1677         ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1678     }
1679
1680     if (m_Parameters.m_LayerNormEnabled)
1681     {
1682         if (!m_Parameters.m_CifgEnabled)
1683         {
1684             if (!m_InputLayerNormWeights)
1685             {
1686                 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
1687                                                "disabled but InputLayerNormWeights are not present");
1688             }
1689             ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1690                                           1, n_cell, "InputLayerNormWeights");
1691         }
1692         else if (m_InputLayerNormWeights)
1693         {
1694             throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
1695                                            "enabled");
1696         }
1697
1698         ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
1699                         "ForgetLayerNormWeights");
1700         ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1701
1702         ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
1703                         "OutputLayerNormWeights");
1704         ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1705
1706         ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
1707                         "CellLayerNormWeights");
1708         ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1709     }
1710     else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1711     {
1712         throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
1713                                        "normalisation weights are present.");
1714     }
1715 }
1716
1717 void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1718 {
1719     const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
1720
1721     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1722     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1723
1724     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1725     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1726
1727     if (inputTensorInfo.GetDataType() != DataType::Float32)
1728     {
1729         throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
1730     }
1731
1732     if (outputTensorInfo.GetDataType() != DataType::Float16)
1733     {
1734         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
1735     }
1736
1737     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1738 }
1739
1740 void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1741 {
1742     const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
1743
1744     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1745     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1746
1747     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1748     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1749
1750     if (inputTensorInfo.GetDataType() != DataType::Float16)
1751     {
1752         throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
1753     }
1754
1755     if (outputTensorInfo.GetDataType() != DataType::Float32)
1756     {
1757         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
1758     }
1759
1760     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1761 }
1762
1763 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1764 {
1765     const std::string descriptorName{"DivisionQueueDescriptor"};
1766
1767     ValidateNumInputs(workloadInfo,  descriptorName, 2);
1768     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1769
1770     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1771     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1772     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1773
1774     std::vector<DataType> supportedTypes =
1775     {
1776         DataType::Float32,
1777         DataType::QuantisedAsymm8,
1778         DataType::QuantisedSymm16,
1779         DataType::Float16
1780     };
1781
1782     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1783     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1784     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1785
1786     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1787                                        inputTensorInfo1,
1788                                        outputTensorInfo,
1789                                        descriptorName,
1790                                        "input_0",
1791                                        "input_1");
1792 }
1793
1794 void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1795 {
1796     const std::string descriptorName{"SubtractionQueueDescriptor"};
1797
1798     ValidateNumInputs(workloadInfo,  descriptorName, 2);
1799     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1800
1801     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1802     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1803     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1804
1805     std::vector<DataType> supportedTypes =
1806     {
1807         DataType::Float32,
1808         DataType::QuantisedAsymm8,
1809         DataType::QuantisedSymm16,
1810         DataType::Float16
1811     };
1812
1813     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1814     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1815     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1816
1817     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1818                                        inputTensorInfo1,
1819                                        outputTensorInfo,
1820                                        descriptorName,
1821                                        "input_0",
1822                                        "input_1");
1823 }
1824
1825 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1826 {
1827     const std::string descriptorName{"MaximumQueueDescriptor"};
1828
1829     ValidateNumInputs(workloadInfo,  descriptorName, 2);
1830     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1831
1832     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1833     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1834     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1835
1836     std::vector<DataType> supportedTypes =
1837     {
1838         DataType::Float16,
1839         DataType::Float32,
1840         DataType::Signed32,
1841         DataType::QuantisedAsymm8,
1842         DataType::QuantisedSymm16
1843     };
1844
1845     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1846     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1847     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1848
1849     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1850                                        inputTensorInfo1,
1851                                        outputTensorInfo,
1852                                        descriptorName,
1853                                        "input_0",
1854                                        "input_1");
1855 }
1856
1857 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1858 {
1859     const std::string descriptorName{"MeanQueueDescriptor"};
1860
1861     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1862     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1863
1864     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1865     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1866
1867     std::vector<DataType> supportedTypes =
1868     {
1869         DataType::Float32,
1870         DataType::Float16,
1871         DataType::QuantisedAsymm8,
1872         DataType::QuantisedSymm16
1873     };
1874
1875     // First check if input tensor data type is supported, then
1876     // check if this data type matches the output tensor data type
1877     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1878     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1879
1880     if (m_Parameters.m_KeepDims)
1881     {
1882         ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1883     }
1884     else if (m_Parameters.m_Axis.empty())
1885     {
1886         ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
1887     }
1888     else
1889     {
1890         unsigned int outputDim =
1891             inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
1892         ValidateTensorNumDimensions(outputTensorInfo,
1893                                     descriptorName,
1894                                     outputDim > 0 ? outputDim : 1,
1895                                     "output");
1896     }
1897 }
1898
1899 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1900 {
1901     const std::string descriptorName{"PadQueueDescriptor"};
1902
1903     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1904     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1905
1906     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1907     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1908
1909     // input and output should have the same number of dimensions
1910     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1911
1912     // there should be entry in the pad list for each dimension in the input tensor
1913     if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
1914         throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
1915                                        "as there are dimensions in the input tensor that is " +
1916                                        std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
1917                                        " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
1918     }
1919 }
1920
1921 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1922 {
1923     const std::string descriptorName{"QuantizeQueueDescriptor"};
1924
1925     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1926     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1927
1928     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1929     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1930
1931     std::vector<DataType> supportedTypes =
1932     {
1933             DataType::Float32,
1934             DataType::Float16
1935     };
1936
1937     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1938
1939     if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
1940         outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
1941     {
1942         throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
1943     }
1944 }
1945
1946 void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1947 {
1948     const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
1949
1950     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1951     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1952
1953     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1954     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1955
1956     std::vector<DataType> supportedTypes =
1957     {
1958             DataType::Float32,
1959             DataType::Float16,
1960             DataType::QuantisedAsymm8,
1961             DataType::QuantisedSymm16
1962     };
1963
1964     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1965     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1966 }
1967
1968 void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1969 {
1970     const std::string descriptorName{"StridedSliceQueueDescriptor"};
1971
1972     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1973     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1974
1975     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1976     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1977
1978     std::vector<DataType> supportedTypes =
1979     {
1980         DataType::Float16,
1981         DataType::Float32,
1982         DataType::QuantisedAsymm8,
1983         DataType::QuantisedSymm16
1984     };
1985
1986     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1987     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1988
1989     ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1990
1991     const uint32_t rank = inputTensorInfo.GetNumDimensions();
1992     if (rank > 4)
1993     {
1994         throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1995     }
1996
1997     // Begin, End & Stride length must be of rank(input0)
1998     if (m_Parameters.m_Begin.size() != rank)
1999     {
2000         throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2001     }
2002
2003     if (m_Parameters.m_End.size() != rank)
2004     {
2005         throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2006     }
2007
2008     if (m_Parameters.m_Stride.size() != rank)
2009     {
2010         throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2011     }
2012
2013     // Stride entries must be non-zero
2014     for (auto& stride : m_Parameters.m_Stride)
2015     {
2016         if (stride == 0)
2017         {
2018             throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2019         }
2020     }
2021 }
2022
2023 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2024 {
2025     const std::string descriptorName{"MinimumQueueDescriptor"};
2026
2027     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2028     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2029
2030     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2031     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2032     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2033
2034     std::vector<DataType> supportedTypes =
2035     {
2036         DataType::Float16,
2037         DataType::Float32,
2038         DataType::Signed32,
2039         DataType::QuantisedAsymm8,
2040         DataType::QuantisedSymm16
2041     };
2042
2043     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2044     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2045     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2046
2047     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2048                                        inputTensorInfo1,
2049                                        outputTensorInfo,
2050                                        descriptorName,
2051                                        "input_0",
2052                                        "input_1");
2053 }
2054
2055 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2056 {
2057     const std::string descriptorName{"DebugQueueDescriptor"};
2058
2059     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2060     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2061 }
2062
2063 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2064 {
2065     const std::string descriptorName{"EqualQueueDescriptor"};
2066
2067     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2068     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2069
2070     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2071     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2072     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2073
2074     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2075                                        inputTensorInfo1,
2076                                        outputTensorInfo,
2077                                        descriptorName,
2078                                        "input_0",
2079                                        "input_1");
2080
2081     if (outputTensorInfo.GetDataType() != DataType::Boolean)
2082     {
2083         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2084     }
2085 }
2086
2087 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2088 {
2089     const std::string descriptorName{"GreaterQueueDescriptor"};
2090
2091     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2092     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2093
2094     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2095     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2096     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2097
2098     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2099                                        inputTensorInfo1,
2100                                        outputTensorInfo,
2101                                        descriptorName,
2102                                        "input_0",
2103                                        "input_1");
2104
2105     if (outputTensorInfo.GetDataType() != DataType::Boolean)
2106     {
2107         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2108     }
2109 }
2110
2111 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2112 {
2113     const std::string descriptorName{"RsqrtQueueDescriptor"};
2114
2115     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2116     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2117
2118     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2119     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2120
2121     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2122
2123     std::vector<DataType> supportedTypes =
2124     {
2125             DataType::Float16,
2126             DataType::Float32,
2127             DataType::QuantisedAsymm8,
2128             DataType::QuantisedSymm16
2129     };
2130
2131     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2132     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2133 }
2134
2135 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2136 {
2137     const std::string descriptorName{"GatherQueueDescriptor"};
2138
2139     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2140     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2141
2142     const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2143     if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2144     {
2145         throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2146     }
2147
2148     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2149     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2150
2151     std::vector<DataType> supportedTypes =
2152     {
2153             DataType::Float16,
2154             DataType::Float32,
2155             DataType::QuantisedAsymm8,
2156             DataType::QuantisedSymm16
2157     };
2158
2159     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2160
2161     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2162
2163     unsigned int outputDim  = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2164     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2165 }
2166
2167 void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2168 {
2169     const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2170
2171     ValidateNumInputs(workloadInfo, descriptorName, 2);
2172
2173     if (workloadInfo.m_OutputTensorInfos.size() != 4)
2174     {
2175         throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2176                                        to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2177     }
2178
2179     if (m_Anchors == nullptr)
2180     {
2181         throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2182     }
2183
2184     const TensorInfo& boxEncodingsInfo =  workloadInfo.m_InputTensorInfos[0];
2185     const TensorInfo& scoresInfo       =  workloadInfo.m_InputTensorInfos[1];
2186     const TensorInfo& anchorsInfo      = m_Anchors->GetTensorInfo();
2187
2188     const TensorInfo& detectionBoxesInfo   = workloadInfo.m_OutputTensorInfos[0];
2189     const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2190     const TensorInfo& detectionScoresInfo  = workloadInfo.m_OutputTensorInfos[2];
2191     const TensorInfo& numDetectionsInfo    = workloadInfo.m_OutputTensorInfos[3];
2192
2193     ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2194     ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2195     ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2196
2197     const std::vector<DataType> supportedInputTypes =
2198     {
2199         DataType::Float32,
2200         DataType::QuantisedAsymm8,
2201         DataType::QuantisedSymm16
2202     };
2203
2204     ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2205     ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2206     ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2207
2208     ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2209     ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2210     ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2211     ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2212
2213     // NOTE: Output is always Float32 regardless of input type
2214     ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2215     ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2216     ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2217     ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2218
2219     if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2220     {
2221         throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2222                                        "must be positive and less than or equal to 1.");
2223     }
2224
2225     if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2226     {
2227         throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2228                                        "should be equal to number of classes + 1.");
2229     }
2230 }
2231
2232 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2233 {
2234     const std::string& descriptorName{"DequantizeQueueDescriptor"};
2235
2236     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2237     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2238
2239     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2240     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2241
2242     if (inputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
2243         inputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
2244     {
2245         throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2246     }
2247
2248     std::vector<DataType> supportedTypes =
2249     {
2250             DataType::Float32,
2251             DataType::Float16
2252     };
2253
2254     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2255 }
2256
2257 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2258 {
2259     const std::string& descriptorName{"MergeQueueDescriptor"};
2260
2261     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2262     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2263
2264     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2265     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2266     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2267
2268     ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2269     ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2270
2271     ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2272     ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2273 }
2274
2275 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2276 {
2277     const std::string& descriptorName{"SwitchQueueDescriptor"};
2278
2279     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2280     ValidateNumOutputs(workloadInfo, descriptorName, 2);
2281
2282     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2283     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2284
2285     const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2286     const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2287
2288     std::vector<DataType> supportedTypes =
2289     {
2290         DataType::Float32,
2291         DataType::QuantisedAsymm8,
2292         DataType::QuantisedSymm16
2293     };
2294
2295     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2296     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2297
2298     ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2299     ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2300
2301     ValidateTensorShapesMatch(inputTensorInfo0,
2302                               outputTensorInfo0,
2303                               descriptorName,
2304                               "input_0",
2305                               "output_0");
2306
2307     ValidateTensorShapesMatch(inputTensorInfo0,
2308                               outputTensorInfo1,
2309                               descriptorName,
2310                               "input_0",
2311                               "output_1");
2312 }
2313
2314 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2315 {
2316     // This is internally generated so it should not need validation.
2317 }
2318
2319 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2320 {
2321     const std::string& descriptorName{"PreluQueueDescriptor"};
2322
2323     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2324     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2325
2326     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2327     const TensorInfo& alphaTensorInfo  = workloadInfo.m_InputTensorInfos[1];
2328     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2329
2330     std::vector<DataType> supportedTypes
2331     {
2332         DataType::Float16,
2333         DataType::Float32,
2334         DataType::QuantisedAsymm8,
2335         DataType::QuantisedSymm16
2336     };
2337
2338     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2339     ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2340
2341     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2342
2343     ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo,  descriptorName, "input", "alpha");
2344     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2345
2346     ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2347                                        alphaTensorInfo,
2348                                        outputTensorInfo,
2349                                        descriptorName,
2350                                        "input",
2351                                        "alpha");
2352 }
2353
2354 void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2355 {
2356     const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2357
2358     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2359     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2360
2361     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2362     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2363
2364     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
2365     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2366
2367     ValidatePointer(m_Weight, descriptorName, "weight");
2368
2369     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2370     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2371     ValidateTensorDataType(weightTensorInfo, inputTensorInfo.GetDataType(), descriptorName, "weight");
2372
2373     if (m_Parameters.m_BiasEnabled)
2374     {
2375         ValidatePointer(m_Bias, descriptorName, "bias");
2376
2377         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
2378         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
2379
2380         ValidateTensorDataType(biasTensorInfo,
2381                                GetBiasDataType(inputTensorInfo.GetDataType()),
2382                                descriptorName,
2383                                "bias");
2384
2385         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2386     }
2387 }
2388
2389 void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2390 {
2391     const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
2392
2393     // Validate number of inputs/outputs
2394     ValidateNumInputs(workloadInfo,  descriptorName, 3);
2395     ValidateNumOutputs(workloadInfo, descriptorName, 2);
2396
2397     // Input/output tensor infos
2398     auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2399     auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
2400     auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
2401
2402     auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2403     auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2404
2405     std::vector<DataType> inputOutputSupportedTypes =
2406     {
2407         DataType::QuantisedAsymm8
2408     };
2409
2410     std::vector<DataType> cellStateSupportedTypes =
2411     {
2412         DataType::QuantisedSymm16
2413     };
2414
2415     std::vector<DataType> weightsSupportedTypes =
2416     {
2417         DataType::QuantisedAsymm8
2418     };
2419
2420     std::vector<DataType> biasSupportedTypes =
2421     {
2422         DataType::Signed32
2423     };
2424
2425     // Validate types of input/output tensors
2426     ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2427     ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2428     ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2429
2430     ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2431     ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2432
2433     // Validate matching types of input/output tensors
2434     ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2435     ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2436                                  "outputStateIn", "outputStateOut");
2437     ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2438
2439     // Validate matching quantization info for input/output tensors
2440     ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2441     ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
2442     ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2443     
2444     // Infer number of batches, input size and output size from tensor dimensions
2445     const uint32_t numBatches = inputInfo.GetShape()[0];
2446     const uint32_t inputSize  = inputInfo.GetShape()[1];
2447     const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2448
2449     // Validate number of dimensions and number of elements for input/output tensors
2450     ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2451     ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
2452     ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2453     ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
2454     ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2455
2456     // Validate number of dimensions and number of elements for weights tensors
2457     ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
2458     auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2459     ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
2460
2461     ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2462     auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2463     ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
2464
2465     ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2466     auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2467     ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
2468
2469     ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2470     auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2471     ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
2472
2473     ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
2474     auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2475     ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
2476
2477     ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2478     auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2479     ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2480                                 " RecurrentToForgetWeights");
2481
2482     ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2483     auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2484     ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2485
2486     ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2487     auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2488     ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2489
2490     // Validate data types for weights tensors (all should match each other)
2491     ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2492
2493     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2494                                  "inputToInputWeights", "inputToForgetWeights");
2495     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2496                                  "inputToInputWeights", "inputToCellWeights");
2497     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2498                                  "inputToInputWeights", "inputToOutputWeights");
2499
2500     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2501                                  "inputToInputWeights", "recurrentToInputWeights");
2502     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2503                                  "inputToInputWeights", "recurrentToForgeteights");
2504     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2505                                  "inputToInputWeights", "recurrentToCellWeights");
2506     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2507                                  "inputToInputWeights", "recurrentToOutputWeights");
2508
2509     // Validate matching quantization info for weight tensors (all should match each other)
2510     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2511                                     descriptorName, "inputToInputWeights", "inputToForgetWeights");
2512     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2513                                     descriptorName, "inputToInputWeights", "inputToCellWeights");
2514     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2515                                     descriptorName, "inputToInputWeights", "inputToOutputWeights");
2516
2517     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2518                                     descriptorName, "inputToInputWeights", "recurrentToInputWeights");
2519     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2520                                     descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
2521     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2522                                     descriptorName, "inputToInputWeights", "recurrentToCellWeights");
2523     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2524                                     descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
2525
2526     // Validate number of dimensions and number of elements in bias tensors
2527     ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
2528     auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2529     ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
2530
2531     ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2532     auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2533     ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
2534
2535     ValidatePointer(m_CellBias, descriptorName, "CellBias");
2536     auto cellBiasInfo = m_CellBias->GetTensorInfo();
2537     ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
2538
2539     ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2540     auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2541     ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
2542
2543     // Validate data types for bias tensors (all should match each other)
2544     ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2545
2546     ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2547                                  "inputGateBias", "forgetGateBias");
2548     ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2549                                  "inputGateBias", "cellBias");
2550     ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2551                                  "inputGateBias", "outputGateBias");
2552
2553     // Validate bias tensor quantization info
2554     ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2555     ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2556     ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2557     ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2558 }
2559
2560 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2561 {
2562     const std::string descriptorName{"AbsQueueDescriptor"};
2563
2564     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2565     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2566
2567     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2568     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2569
2570     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2571
2572     std::vector<DataType> supportedTypes =
2573         {
2574             DataType::Float16,
2575             DataType::Float32,
2576             DataType::QuantisedAsymm8,
2577             DataType::QuantisedSymm16
2578         };
2579
2580     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2581     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2582 }
2583
2584 } // namespace armnn