fed159bd606016c6aa9a5edba9ef4e36d0279323
[platform/upstream/armnn.git] / src / backends / backendsCommon / WorkloadData.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "WorkloadData.hpp"
6
7 #include "CpuTensorHandle.hpp"
8
9 #include <DataLayoutIndexed.hpp>
10
11 #include <algorithm>
12 #include <iomanip>
13 #include <string>
14 #include <sstream>
15
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
18
19 using namespace armnnUtils;
20
21 namespace armnn
22 {
23
24 //---------------------------------------------------------------
25 DataType GetBiasDataType(DataType inputDataType)
26 {
27     switch (inputDataType)
28     {
29         case DataType::Float16:
30             return DataType::Float16;
31         case DataType::Float32:
32             return DataType::Float32;
33         case DataType::QuantisedAsymm8:
34             return DataType::Signed32;
35         case DataType::QuantisedSymm16:
36             return DataType::Signed32;
37         default:
38             BOOST_ASSERT_MSG(false, "Invalid input data type");
39             return DataType::Float32;
40     }
41 }
42
43 namespace
44 {
45
46 //---------------------------------------------------------------
47 //android ndk does not support std::to_string function.
48 template <typename T>
49 std::string to_string(T value)
50 {
51     std::ostringstream os;
52     os << value;
53     return os.str();
54 }
55
56 //---------------------------------------------------------------
57 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
58 {
59     if (!ptr)
60     {
61         throw InvalidArgumentException(descName +  ": Invalid null pointer. The " +
62                                       paramName + " parameter must be set.");
63     }
64 }
65
66 //---------------------------------------------------------------
67 void ValidateTensorShapesMatch(const TensorInfo& first,
68                                const TensorInfo& second,
69                                std::string const& descName,
70                                std::string const& firstName,
71                                std::string const& secondName)
72 {
73     if (first.GetShape() != second.GetShape())
74     {
75         throw InvalidArgumentException(descName + ": "
76                                        + firstName + " & " + secondName + " must have identical shapes");
77     }
78 }
79
80 //---------------------------------------------------------------
81 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
82 {
83     if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
84     {
85         throw InvalidArgumentException(descName +
86                                        ": Requires exactly " + to_string(expectedSize) + "input(s). " +
87                                        to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
88     }
89 }
90
91 //---------------------------------------------------------------
92 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
93 {
94     if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
95     {
96         throw InvalidArgumentException(descName +
97                                        ": Requires exactly " + to_string(expectedSize) + " output(s). " +
98                                        to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
99     }
100 }
101
102 //---------------------------------------------------------------
103 void ValidateTensorNumDimensions(const TensorInfo& tensor,
104                                  std::string const& descName,
105                                  unsigned int numDimensions,
106                                  std::string const& tensorName)
107 {
108     if (tensor.GetNumDimensions() != numDimensions)
109     {
110         throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111             to_string(tensor.GetNumDimensions()) + " dimensions for " +
112             tensorName + " tensor.");
113     }
114 }
115
116 //---------------------------------------------------------------
117 void ValidateTensorNumElements(const TensorInfo& tensor,
118                                std::string const& descName,
119                                unsigned int numElements,
120                                std::string const& tensorName)
121 {
122     if (tensor.GetNumElements() != numElements)
123     {
124         throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
125                                        to_string(tensor.GetNumElements()) + " elements for " +
126                                        tensorName + " tensor.");
127     }
128 }
129
130 //---------------------------------------------------------------
131 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
132                                  unsigned int numDimension,
133                                  unsigned int numElements,
134                                  std::string const& tensorName)
135 {
136     const std::string functionName{"ValidateTensorNumDimNumElem"};
137     ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
138     ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
139 }
140
141 //---------------------------------------------------------------
142 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
143     const std::string& descName, std::string const& tensorName)
144 {
145     if (tensor.GetDataType() != dataType)
146     {
147         throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
148             GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
149     }
150 }
151
152 //---------------------------------------------------------------
153 void ValidateTensorQuantizationSpace(const TensorInfo& first,
154                                      const TensorInfo& second,
155                                      const std::string& descName,
156                                      std::string const& firstName,
157                                      std::string const& secondName)
158 {
159     if (!first.IsQuantized() ||
160         !second.IsQuantized())
161     {
162         // Not a quantized type, ignore the validation
163         return;
164     }
165
166     DataType firstDataType  = first.GetDataType();
167     DataType secondDataType = second.GetDataType();
168
169     if (firstDataType != secondDataType)
170     {
171         throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
172                                        " must be of the same quantized type, " +
173                                        firstName + " is " + GetDataTypeName(firstDataType) + ", " +
174                                        secondName + " is " + GetDataTypeName(secondDataType));
175     }
176
177     if (!first.IsTypeSpaceMatch(second))
178     {
179         throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
180                                        " must have the same quantization space, " +
181                                        firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
182                                        " and scale " + to_string(first.GetQuantizationScale()) + ", " +
183                                        secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
184                                        " and scale " + to_string(second.GetQuantizationScale()));
185     }
186 }
187
188 //---------------------------------------------------------------
189 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
190                                     const TensorInfo& inputTensorInfo,
191                                     const TensorInfo& weightsTensorInfo,
192                                     const std::string& descName)
193 {
194     if (biasTensor.GetQuantizationOffset() != 0)
195     {
196         throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
197             to_string(biasTensor.GetQuantizationOffset()));
198     }
199     const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
200     if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
201     {
202         // Print the float values with extra precision to see very small differences
203         std::stringstream msg;
204         msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
205             " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
206             biasTensor.GetQuantizationScale();
207         throw InvalidArgumentException(msg.str());
208     }
209 }
210
211 //---------------------------------------------------------------
212 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
213     unsigned int numExpected,
214     const std::string& descName,
215     const std::string& varName)
216 {
217     if (vec.empty() && numExpected > 0)
218     {
219         throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
220     }
221
222     for (unsigned int i = 0; i < numExpected; ++i)
223     {
224         if (!vec[i])
225         {
226             throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
227         }
228     }
229 }
230
231 //---------------------------------------------------------------
232 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
233                                         const TensorInfo& second,
234                                         const TensorInfo& output,
235                                         std::string const& descName,
236                                         std::string const& firstName,
237                                         std::string const& secondName)
238 {
239     // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
240     // broadcasted.
241     if (first.GetNumDimensions() != second.GetNumDimensions())
242     {
243         throw InvalidArgumentException(descName  + ": Tensors "
244             + firstName + " & " + secondName
245             + " must have the same number of dimensions in order to be broadcasted");
246     }
247     uint32_t numDims = first.GetNumDimensions();
248     std::vector<uint32_t> outputDims(numDims, 0u);
249     for (uint32_t i = 0; i < numDims; i++)
250     {
251         const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
252         const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
253         if (dimsNotEqual && dimsNotOne)
254         {
255             throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
256         }
257         outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
258     }
259     TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
260     if (broadcastShape != output.GetShape())
261     {
262         throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
263                                        + firstName + " & " + secondName
264                                        + " does not match the output shape");
265     }
266 }
267
268 //---------------------------------------------------------------
269 void ValidateDataTypes(const TensorInfo& info,
270                        const std::vector<armnn::DataType>& supportedTypes,
271                        std::string const& descName)
272 {
273     auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
274     if (iterator == supportedTypes.end())
275     {
276         throw InvalidArgumentException(descName  + ": " + " Tensor type is not supported.");
277     }
278 }
279
280 //---------------------------------------------------------------
281 void ValidateTensorDataTypesMatch(const TensorInfo& first,
282                                   const TensorInfo& second,
283                                   std::string const& descName,
284                                   std::string const& firstName,
285                                   std::string const& secondName)
286 {
287     if (first.GetDataType() != second.GetDataType())
288     {
289         throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
290                                        " must have identical data types.");
291     }
292 }
293
294 //---------------------------------------------------------------
295 void ValidateTensorNumElementsMatch(const TensorInfo& first,
296                                     const TensorInfo& second,
297                                     std::string const& descName,
298                                     std::string const& firstName,
299                                     std::string const& secondName)
300 {
301     if (first.GetNumElements() != second.GetNumElements())
302     {
303         throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
304                                        " must have the same number of elements.");
305     }
306 }
307
308 } // anonymous namespace
309
310 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
311     unsigned int numExpectedIn, unsigned int numExpectedOut) const
312 {
313     ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
314     ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
315 }
316
317 //---------------------------------------------------------------
318 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
319 {
320     const std::string descriptorName{"MemCopyQueueDescriptor"};
321
322     ValidateNumInputs(workloadInfo,  descriptorName, 1);
323     ValidateNumOutputs(workloadInfo, descriptorName , 1);
324
325     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
326     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
327
328     ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
329     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
330
331     if (m_Inputs.size() != m_Outputs.size())
332     {
333         throw InvalidArgumentException(boost::str(
334             boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
335                           descriptorName % m_Inputs.size() % m_Outputs.size()));
336     }
337
338     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
339     {
340         if (!m_Inputs[i])
341         {
342             throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
343                                                       descriptorName % i));
344         }
345
346         if (!m_Outputs[i])
347         {
348             throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
349                                                       descriptorName % i));
350         }
351     }
352 }
353
354 //---------------------------------------------------------------
355 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
356 {
357     ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
358     ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
359
360     if (workloadInfo.m_InputTensorInfos.size() != 1)
361     {
362         throw InvalidArgumentException(boost::str(
363             boost::format("Number of input infos (%1%) is not 1.")
364             % workloadInfo.m_InputTensorInfos.size()));
365
366     }
367
368     if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
369     {
370         throw InvalidArgumentException(boost::str(
371             boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
372             % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
373     }
374
375     for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
376     {
377         if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
378             workloadInfo.m_OutputTensorInfos[i].GetNumElements())
379         {
380             throw InvalidArgumentException(boost::str(
381                 boost::format("Number of elements for tensor input and output %1% does not match")
382                 % i ));
383         }
384     }
385
386     if (m_Inputs.size() != 1)
387     {
388         throw InvalidArgumentException(boost::str(
389             boost::format("Number of inputs (%1%) is not 1.")
390             % m_Inputs.size()));
391     }
392
393     if (m_Inputs.size() != m_Outputs.size())
394     {
395         throw InvalidArgumentException(boost::str(
396             boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
397             % m_Inputs.size() % m_Outputs.size()));
398     }
399
400     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
401     {
402         if (!m_Inputs[i])
403         {
404             throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
405         }
406
407         if (!m_Outputs[i])
408         {
409             throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
410         }
411     }
412 }
413
414 //---------------------------------------------------------------
415 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
416 {
417     ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
418     ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
419
420     if (m_Inputs.size() != 1)
421     {
422         throw InvalidArgumentException(boost::str(
423             boost::format("Number of inputs (%1%) is not 1.")
424             % m_Inputs.size()));
425     }
426
427     if (m_Outputs.size() != 0)
428     {
429         throw InvalidArgumentException(boost::str(
430             boost::format("Number of outputs (%1%) is not 0.")
431             % m_Inputs.size() % m_Outputs.size()));
432     }
433
434     if (!m_Inputs[0])
435     {
436         throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
437     }
438 }
439
440 //---------------------------------------------------------------
441 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
442 {
443     const std::string descriptorName{"ActivationQueueDescriptor"};
444
445     ValidateNumInputs(workloadInfo,  descriptorName, 1);
446     ValidateNumOutputs(workloadInfo, descriptorName, 1);
447
448     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
449     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
450
451     std::vector<DataType> supportedTypes =
452     {
453             DataType::Float16,
454             DataType::Float32,
455             DataType::QuantisedAsymm8,
456             DataType::QuantisedSymm16
457     };
458
459     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
460     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
461     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
462 }
463
464 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
465 {
466     const std::string descriptorName{"SoftmaxQueueDescriptor"};
467
468     ValidateNumInputs(workloadInfo,  descriptorName, 1);
469     ValidateNumOutputs(workloadInfo, descriptorName, 1);
470
471     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
472     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
473
474     std::vector<DataType> supportedTypes =
475     {
476             DataType::Float16,
477             DataType::Float32,
478             DataType::QuantisedAsymm8,
479             DataType::QuantisedSymm16
480     };
481
482     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
483     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
484     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
485 }
486
487 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
488 {
489     const std::string descriptorName{"SplitterQueueDescriptor"};
490
491     ValidateNumInputs(workloadInfo, descriptorName, 1);
492
493     // Check the supported data types
494     std::vector<DataType> supportedTypes =
495     {
496             DataType::Float32,
497             DataType::Float16,
498             DataType::Boolean,
499             DataType::Signed32,
500             DataType::QuantisedAsymm8,
501             DataType::QuantisedSymm16
502     };
503
504     const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
505     for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
506     {
507         const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
508         ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
509
510         const std::string outputName = "output_" + std::to_string(i);
511         ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
512     }
513
514     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
515     {
516         throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
517     }
518
519     if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
520     {
521         throw InvalidArgumentException(
522             descriptorName + ": Number of split windows "
523             "has to match number of workloadInfo.m_OutputTensorInfos. "
524             "Number of windows: " +
525             to_string(m_ViewOrigins.size()) +
526             ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
527     }
528
529     //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
530     std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
531     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
532     {
533         //Checks that the dimensionality of input is same as the split windows.
534         ViewOrigin const& e = m_ViewOrigins[w];
535         if (e.m_Origin.size() != inputDims)
536         {
537             throw InvalidArgumentException(descriptorName + ": Window origin have to "
538                                            "have the same dimensionality as the input tensor. "
539                                            "Window origin (index: " +
540                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
541                                            " dimensions, the input "
542                                            "tensor has " +
543                                            to_string(inputDims) + " dimensions.");
544         }
545         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
546         {
547             if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
548                 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
549             {
550                 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
551                                                "be smaller or equal than the size of the input in that coord.");
552             }
553         }
554     }
555 }
556
557 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
558 {
559     const std::string descriptorName{"ConcatQueueDescriptor"};
560
561     ValidateNumOutputs(workloadInfo, descriptorName, 1);
562
563     if (m_Inputs.size() <= 0)
564     {
565         throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
566     }
567     if (m_Outputs.size() <= 0)
568     {
569         throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
570     }
571
572     if (workloadInfo.m_InputTensorInfos.size() <= 0)
573     {
574         throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
575     }
576     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
577     {
578         throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
579     }
580
581     if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
582     {
583         throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
584     }
585
586     if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
587     {
588         return;
589     }
590
591     if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
592     {
593         throw InvalidArgumentException(
594             descriptorName + ": Number of split windows "
595             "has to match number of workloadInfo.m_InputTensorInfos. "
596             "Number of windows: " +
597             to_string(m_ViewOrigins.size()) +
598             ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
599     }
600
601     //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
602     std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
603     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
604     {
605         //Checks that the dimensionality of output is same as the split windows.
606         ViewOrigin const& e = m_ViewOrigins[w];
607         if (e.m_Origin.size() != outputDims)
608         {
609             throw InvalidArgumentException(descriptorName + ": Window origin have to "
610                                            "have the same dimensionality as the output tensor. "
611                                            "Window origin (index: " +
612                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
613                                            " dimensions, the output "
614                                            "tensor has " +
615                                            to_string(outputDims) + " dimensions.");
616         }
617         //Checks that the merge windows are within the output tensor.
618         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
619         {
620             if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
621                 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
622             {
623                 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
624                                                "be smaller or equal than the size of the output in that coord.");
625             }
626         }
627     }
628
629     // Check the supported data types
630     std::vector<DataType> supportedTypes =
631     {
632             DataType::Float32,
633             DataType::Float16,
634             DataType::Boolean,
635             DataType::Signed32,
636             DataType::QuantisedAsymm8,
637             DataType::QuantisedSymm16
638     };
639
640     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
641     for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
642     {
643         const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
644         ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
645
646         const std::string inputName = "input_" + std::to_string(i);
647         ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
648     }
649 }
650
651 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
652 {
653     const std::string descriptorName{"StackQueueDescriptor"};
654
655     ValidateNumOutputs(workloadInfo, descriptorName, 1);
656
657     if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
658     {
659         throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
660     }
661
662     // All inputs must have the same shape, which is defined in parameters
663     const TensorShape& inputShape = m_Parameters.m_InputShape;
664     for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
665     {
666         if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
667         {
668             throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
669         }
670     }
671
672     if (inputShape.GetNumDimensions() > 4)
673     {
674         throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
675     }
676
677     // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
678     // since the output tensor has an additional dimension.
679     if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
680     {
681         throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
682                                        "than the number of input dimensions.");
683     }
684
685     // Output shape must be as inferred from the input shape
686     const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
687     for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
688     {
689         if (outputShape[i] != inputShape[i])
690         {
691             throw InvalidArgumentException(descriptorName + ": Output tensor must "
692                                            "match shape inferred from input tensor.");
693         }
694     }
695
696     if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
697     {
698         throw InvalidArgumentException(descriptorName + ": Output tensor must "
699                                        "match shape inferred from input tensor.");
700     }
701
702     for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
703     {
704         if (outputShape[i] != inputShape[i-1])
705         {
706             throw InvalidArgumentException(descriptorName + ": Output tensor must "
707                                            "match shape inferred from input tensor.");
708         }
709     }
710
711     if (outputShape.GetNumDimensions() > 5)
712     {
713         throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
714     }
715
716     // Check the supported data types
717     std::vector<DataType> supportedTypes =
718     {
719             DataType::Float32,
720             DataType::Float16,
721             DataType::Boolean,
722             DataType::Signed32,
723             DataType::QuantisedAsymm8,
724             DataType::QuantisedSymm16
725     };
726
727     ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
728
729     for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
730     {
731         ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
732                                      workloadInfo.m_InputTensorInfos[i],
733                                      descriptorName,
734                                      "input_0",
735                                      "input_" + std::to_string(i));
736     }
737
738     ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
739                                  workloadInfo.m_OutputTensorInfos[0],
740                                  descriptorName,
741                                  "input_0",
742                                  "output");
743 }
744
745 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
746 {
747     const std::string descriptorName{"FullyConnectedQueueDescriptor"};
748
749     ValidateNumInputs(workloadInfo,  descriptorName, 1);
750     ValidateNumOutputs(workloadInfo, descriptorName, 1);
751
752     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
753     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
754
755     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
756
757     if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
758     {
759         throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
760     }
761
762     ValidatePointer(m_Weight, descriptorName, "weight");
763
764     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
765     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
766
767     if (m_Parameters.m_BiasEnabled)
768     {
769         ValidatePointer(m_Bias, descriptorName, "bias");
770
771         // Validates type and quantization values.
772         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
773         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
774
775         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
776         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
777     }
778
779     // Check the supported data types
780     std::vector<DataType> supportedTypes =
781     {
782             DataType::Float32,
783             DataType::Float16,
784             DataType::QuantisedAsymm8,
785             DataType::QuantisedSymm16
786     };
787
788     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
789     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
790 }
791
792 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
793 {
794     const std::string descriptorName{"NormalizationQueueDescriptor"};
795
796     ValidateNumInputs(workloadInfo,  descriptorName, 1);
797     ValidateNumOutputs(workloadInfo, descriptorName, 1);
798
799     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
800     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
801
802     // Check the supported data types
803     std::vector<DataType> supportedTypes =
804     {
805         DataType::Float16,
806         DataType::Float32,
807         DataType::QuantisedAsymm8,
808         DataType::QuantisedSymm16
809     };
810
811     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
812
813     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
814
815     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
816 }
817
818 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
819 {
820     const std::string descriptorName{"AdditionQueueDescriptor"};
821
822     ValidateNumInputs(workloadInfo,  descriptorName, 2);
823     ValidateNumOutputs(workloadInfo, descriptorName, 1);
824
825     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
826     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
827     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
828
829     std::vector<DataType> supportedTypes =
830     {
831         DataType::Float32,
832         DataType::QuantisedAsymm8,
833         DataType::QuantisedSymm16,
834         DataType::Float16
835     };
836
837     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
838     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
839     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
840
841     ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
842     ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
843
844     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
845                                        inputTensorInfo1,
846                                        outputTensorInfo,
847                                        descriptorName,
848                                        "input_0",
849                                        "input_1");
850 }
851
852 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
853 {
854     const std::string descriptorName{"MultiplicationQueueDescriptor"};
855
856     ValidateNumInputs(workloadInfo,  descriptorName, 2);
857     ValidateNumOutputs(workloadInfo, descriptorName, 1);
858
859     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
860     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
861     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
862
863     std::vector<DataType> supportedTypes =
864     {
865         DataType::Float32,
866         DataType::QuantisedAsymm8,
867         DataType::QuantisedSymm16,
868         DataType::Float16
869     };
870
871     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
872     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
873     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
874
875     ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
876     ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
877
878     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
879                                        inputTensorInfo1,
880                                        outputTensorInfo,
881                                        descriptorName,
882                                        "input_0",
883                                        "input_1");
884 }
885
886 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
887 {
888     const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
889
890     ValidateNumInputs(workloadInfo,  descriptorName, 1);
891     ValidateNumOutputs(workloadInfo, descriptorName, 1);
892
893     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
894     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
895
896     std::vector<DataType> supportedTypes =
897     {
898         DataType::Float16,
899         DataType::Float32,
900         DataType::QuantisedAsymm8,
901         DataType::QuantisedSymm16
902     };
903
904     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
905     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
906
907     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
908     ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
909     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
910
911     ValidatePointer(m_Mean,     descriptorName, "mean");
912     ValidatePointer(m_Variance, descriptorName, "variance");
913     ValidatePointer(m_Beta,     descriptorName, "beta");
914     ValidatePointer(m_Gamma,    descriptorName, "gamma");
915
916     const TensorInfo& mean     = m_Mean->GetTensorInfo();
917     const TensorInfo& variance = m_Variance->GetTensorInfo();
918     const TensorInfo& beta     = m_Beta->GetTensorInfo();
919     const TensorInfo& gamma    = m_Gamma->GetTensorInfo();
920
921     ValidateTensorNumDimensions(mean,     descriptorName, 1, "mean");
922     ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
923     ValidateTensorNumDimensions(beta,     descriptorName, 1, "beta");
924     ValidateTensorNumDimensions(gamma,    descriptorName, 1, "gamma");
925
926     ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
927     ValidateTensorShapesMatch(mean, beta,     descriptorName, "mean", "beta");
928     ValidateTensorShapesMatch(mean, gamma,    descriptorName, "mean", "gamma");
929 }
930
931 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
932 {
933     const std::string descriptorName{"Convolution2dQueueDescriptor"};
934
935     ValidateNumInputs(workloadInfo,  descriptorName, 1);
936     ValidateNumOutputs(workloadInfo, descriptorName, 1);
937
938     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
939     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
940
941     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
942     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
943
944     ValidatePointer(m_Weight, descriptorName, "weight");
945
946     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
947     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
948
949     ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
950
951     if (m_Parameters.m_BiasEnabled)
952     {
953         ValidatePointer(m_Bias, descriptorName, "bias");
954
955         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
956         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
957
958         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
959         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
960     }
961
962     std::vector<DataType> supportedTypes =
963     {
964         DataType::Float32,
965         DataType::QuantisedAsymm8,
966         DataType::QuantisedSymm16,
967         DataType::Float16
968     };
969
970     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
971     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
972 }
973
974 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
975 {
976     const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
977
978     ValidateNumInputs(workloadInfo,  descriptorName, 1);
979     ValidateNumOutputs(workloadInfo, descriptorName, 1);
980
981     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
982     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
983
984     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
985     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
986
987     ValidatePointer(m_Weight, descriptorName, "weight");
988
989     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
990     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
991
992     if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
993     {
994         throw InvalidArgumentException(
995             boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
996                                      "cannot be smaller than 1.") % descriptorName %
997                                      m_Parameters.m_DilationX % m_Parameters.m_DilationX));
998     }
999
1000     const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1001
1002     // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1003     // inputChannels * channelMultiplier should be equal to outputChannels.
1004     const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1005     const unsigned int numWeightInputChannels     = weightTensorInfo.GetShape()[1];
1006     const unsigned int numWeightOutputChannels    = outputTensorInfo.GetShape()[channelIndex];
1007     if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1008     {
1009         throw InvalidArgumentException(
1010             boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1011                                      "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1012                                      "(provided %4%).") % descriptorName % numWeightOutputChannels %
1013                                      numWeightInputChannels % numWeightChannelMultiplier));
1014     }
1015
1016     ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
1017
1018     if (m_Parameters.m_BiasEnabled)
1019     {
1020         ValidatePointer(m_Bias, descriptorName, "bias");
1021
1022         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
1023         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1024
1025         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1026         ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1027     }
1028
1029     std::vector<DataType> supportedTypes =
1030     {
1031         DataType::Float32,
1032         DataType::QuantisedAsymm8,
1033         DataType::QuantisedSymm16,
1034         DataType::Float16
1035     };
1036
1037     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1038     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1039 }
1040
1041 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1042 {
1043     const std::string descriptorName{"PermuteQueueDescriptor"};
1044
1045     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1046     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1047
1048     const PermutationVector& mapping = m_Parameters.m_DimMappings;
1049
1050     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1051     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1052
1053     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, mapping.GetSize(), "input");
1054     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1055
1056     for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1057     {
1058         if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1059         {
1060             throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1061                                            " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1062                                            "must match dst dimension " + to_string(mapping[i]) +
1063                                            " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1064         }
1065     }
1066
1067     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1068 }
1069
1070 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1071 {
1072     const std::string descriptorName{"Pooling2dQueueDescriptor"};
1073
1074     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1075     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1076
1077     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1078     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1079
1080     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1081     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1082
1083     std::vector<DataType> supportedTypes =
1084     {
1085         DataType::Float32,
1086         DataType::Float16,
1087         DataType::QuantisedAsymm8,
1088         DataType::QuantisedSymm16
1089     };
1090
1091     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1092     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1093 }
1094
1095 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1096 {
1097     const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1098
1099     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1100     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1101
1102     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1103     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1104
1105     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1106     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1107
1108     std::vector<DataType> supportedTypes =
1109     {
1110         DataType::Float16,
1111         DataType::Float32,
1112         DataType::QuantisedAsymm8,
1113         DataType::QuantisedSymm16
1114     };
1115
1116     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1117     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1118
1119     // ResizeBilinear only changes width and height: batch and channel count must match.
1120     const unsigned int inputBatchSize  = inputTensorInfo.GetShape()[0];
1121     const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1122     if (inputBatchSize != outputBatchSize)
1123     {
1124         throw InvalidArgumentException(
1125             boost::str(boost::format("%1%: Input batch size (%2%) "
1126                 "does not match output batch size (%3%)") %
1127                 descriptorName % inputBatchSize % outputBatchSize));
1128     }
1129
1130     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1131     const unsigned int inputChannelCount  = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1132     const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1133     if (inputChannelCount != outputChannelCount)
1134     {
1135         throw InvalidArgumentException(
1136             boost::str(boost::format("%1%: Input channel count (%2%) "
1137                 "does not match output channel count (%3%)") %
1138                 descriptorName % inputChannelCount % outputChannelCount));
1139     }
1140 }
1141
1142 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1143 {
1144     const std::string descriptorName{"ResizeQueueDescriptor"};
1145
1146     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1147     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1148
1149     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1150     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1151
1152     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1153     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1154
1155     std::vector<DataType> supportedTypes =
1156     {
1157         DataType::Float16,
1158         DataType::Float32,
1159         DataType::QuantisedAsymm8,
1160         DataType::QuantisedSymm16
1161     };
1162
1163     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1164     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1165
1166     // Resize only changes width and height: batch and channel count must match.
1167     const unsigned int inputBatchSize  = inputTensorInfo.GetShape()[0];
1168     const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1169     if (inputBatchSize != outputBatchSize)
1170     {
1171         throw InvalidArgumentException(
1172                 boost::str(boost::format("%1%: Input batch size (%2%) "
1173                            "does not match output batch size (%3%)") %
1174                            descriptorName % inputBatchSize % outputBatchSize));
1175     }
1176
1177     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1178     const unsigned int inputChannelCount  = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1179     const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1180     if (inputChannelCount != outputChannelCount)
1181     {
1182         throw InvalidArgumentException(
1183                 boost::str(boost::format("%1%: Input channel count (%2%) "
1184                            "does not match output channel count (%3%)") %
1185                            descriptorName % inputChannelCount % outputChannelCount));
1186     }
1187 }
1188
1189 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1190 {
1191     const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1192
1193     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1194     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1195
1196     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1197     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1198
1199     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 2, "input");
1200     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1201
1202     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo,  descriptorName, "input", "output");
1203
1204     if (m_Parameters.m_Min > m_Parameters.m_Max)
1205     {
1206         throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1207     }
1208 }
1209
1210 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1211 {
1212     const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1213
1214     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1215     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1216
1217     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1218     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1219
1220     if (inputTensorInfo.GetNumDimensions() > 4)
1221     {
1222         throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1223     }
1224
1225     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1226
1227     // Check the supported data types
1228     std::vector<DataType> supportedTypes =
1229     {
1230         DataType::Float32,
1231         DataType::Float16,
1232         DataType::QuantisedAsymm8,
1233         DataType::QuantisedSymm16
1234     };
1235
1236     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1237     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1238
1239     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1240 }
1241
1242 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1243 {
1244     const std::string descriptorName{"ConstantQueueDescriptor"};
1245
1246     ValidateNumInputs(workloadInfo,  descriptorName, 0);
1247     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1248
1249     if (!m_LayerOutput)
1250     {
1251         throw InvalidArgumentException(descriptorName + ": No const input specified.");
1252     }
1253
1254     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1255     ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1256
1257     // Check the supported data types
1258     std::vector<DataType> supportedTypes =
1259     {
1260         DataType::Float32,
1261         DataType::Float16,
1262         DataType::Signed32,
1263         DataType::QuantisedAsymm8,
1264         DataType::QuantisedSymm16
1265     };
1266
1267     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1268 }
1269
1270 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1271 {
1272     const std::string descriptorName{"ReshapeQueueDescriptor"};
1273
1274     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1275     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1276
1277     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1278     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1279
1280     ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1281
1282     // Check the supported data types
1283     std::vector<DataType> supportedTypes =
1284     {
1285         DataType::Float32,
1286         DataType::Float16,
1287         DataType::QuantisedAsymm8,
1288         DataType::QuantisedSymm16
1289     };
1290
1291     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1292     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1293 }
1294
1295 void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1296 {
1297     const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1298
1299     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1300     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1301
1302     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1303     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1304
1305     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1306     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1307
1308     if (m_Parameters.m_BlockShape.size() != 2)
1309     {
1310         throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1311     }
1312
1313     if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1314     {
1315         throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1316                                        "dimensions as Block Shape.");
1317     }
1318
1319     const TensorShape& inputShape = inputTensorInfo.GetShape();
1320
1321     std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1322     std::pair<unsigned int, unsigned int> widthPad  = m_Parameters.m_PadList[1];
1323
1324     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1325
1326     const unsigned int inputWidth  = inputShape[dimensionIndices.GetWidthIndex()] +
1327                                      widthPad.first + widthPad.second;
1328     const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1329                                      heightPad.first + heightPad.second;
1330
1331     const unsigned int numInputElements  = inputShape[0] * inputHeight * inputWidth *
1332                                            inputShape[dimensionIndices.GetChannelsIndex()];
1333     const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1334
1335     if (numOutputElements != numInputElements)
1336     {
1337         throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1338             to_string(numInputElements) + " after padding but output tensor has " +
1339             to_string(numOutputElements) + " elements.");
1340     }
1341
1342     if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1343     {
1344         throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1345                                        "divisible by Block Shape in all spatial dimensions");
1346     }
1347
1348     std::vector<DataType> supportedTypes =
1349     {
1350             DataType::Float16,
1351             DataType::Float32,
1352             DataType::QuantisedAsymm8,
1353             DataType::QuantisedSymm16
1354     };
1355
1356     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1357     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1358 }
1359
1360 void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1361 {
1362     const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1363
1364     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1365     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1366
1367     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1368     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1369
1370     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
1371     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1372
1373     std::vector<DataType> supportedTypes =
1374     {
1375         DataType::Float32,
1376         DataType::Float16,
1377         DataType::QuantisedAsymm8,
1378         DataType::QuantisedSymm16
1379     };
1380
1381     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1382     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1383
1384     DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1385     const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1386     const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1387     const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1388
1389     const TensorShape& inputShape = inputTensorInfo.GetShape();
1390
1391     const unsigned int numInputElements  =
1392         inputShape[0] * inputShape[wIndex] * inputShape[hIndex] * inputShape[cIndex];
1393     const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1394
1395     if (numOutputElements != numInputElements)
1396     {
1397         throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1398             std::to_string(numInputElements) + " but output tensor has " +
1399             std::to_string(numOutputElements) + " elements.");
1400     }
1401
1402     if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex]  % m_Parameters.m_BlockSize != 0)
1403     {
1404         throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1405                                        "by block size in all spatial dimensions");
1406     }
1407 }
1408
1409 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1410 {
1411     const std::string descriptorName{"FloorQueueDescriptor"};
1412
1413     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1414     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1415
1416     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1417     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1418
1419     std::vector<DataType> supportedTypes =
1420     {
1421         DataType::Float32,
1422         DataType::QuantisedSymm16
1423     };
1424
1425     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1426
1427     if (inputTensorInfo != outputTensorInfo)
1428     {
1429         throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1430     }
1431 }
1432
1433 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1434 {
1435     // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1436
1437     const std::string descriptorName{"LstmQueueDescriptor"};
1438
1439     // check dimensions of all inputs and outputs
1440     if (workloadInfo.m_InputTensorInfos.size() != 3)
1441     {
1442         throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1443     }
1444     if (workloadInfo.m_OutputTensorInfos.size() != 4)
1445     {
1446         throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1447     }
1448
1449     std::vector<DataType> supportedTypes =
1450     {
1451         DataType::Float16,
1452         DataType::Float32,
1453         DataType::QuantisedSymm16
1454     };
1455
1456     // check for supported type of one input and match them with all the other input and output
1457     ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1458
1459     // type matches all other inputs
1460     for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1461     {
1462         ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1463                                      workloadInfo.m_InputTensorInfos[i],
1464                                      descriptorName,
1465                                      "input_0",
1466                                      "input_" + std::to_string(i));
1467     }
1468     // type matches all other outputs
1469     for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1470     {
1471         ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1472                                      workloadInfo.m_OutputTensorInfos[i],
1473                                      "LstmQueueDescriptor",
1474                                      "input_0",
1475                                      "output_" + std::to_string(i));
1476     }
1477
1478     // TODO: check clipping parameter is valid
1479
1480     // Inferring batch size, number of outputs and number of cells from the inputs.
1481     // TODO: figure out if there is a way to make sure the specific inputs are at that index of workloadInfo
1482     const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1483     const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1484     ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1485     const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1486     ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1487     const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1488
1489     // input tensor
1490     ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1491                                 descriptorName + " input_0");
1492     // outputStateInTensor
1493     ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1494                                 descriptorName + " input_1");
1495     // outputStateInTensor
1496     ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1497                                 descriptorName + " input_2");
1498     // scratchBufferTensor
1499     unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1500     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1501                                 descriptorName + " output_0");
1502     // outputStateOutTensor
1503     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1504                                 descriptorName + " output_1");
1505     // cellStateOutTensor
1506     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1507                                 descriptorName + " output_2");
1508     // outputTensor
1509     ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1510                                 descriptorName + " output_3");
1511
1512
1513     // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1514     if ( m_InputToInputWeights )
1515     {
1516         ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1517                                       (n_cell * n_input), "InputLayerNormWeights");
1518     }
1519
1520     ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1521     ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1522                                   (n_cell * n_input), "InputToForgetWeights");
1523
1524     ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1525     ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1526                                   (n_cell * n_input), "InputToCellWeights");
1527
1528     if ( m_RecurrentToInputWeights )
1529     {
1530         ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1531                                       (n_cell * n_output), "RecurrentToInputWeights");
1532     }
1533
1534     ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1535     ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1536                                   (n_cell * n_output), "RecurrentToForgetWeights");
1537
1538     ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1539     ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1540                                   (n_cell * n_output), "RecurrentToCellWeights");
1541
1542     // Make sure the input-gate's parameters are either both present (regular
1543     // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1544     bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1545                                      !m_Parameters.m_CifgEnabled) ||
1546                                      (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1547                                      m_Parameters.m_CifgEnabled));
1548     if (!cifg_weights_all_or_none)
1549     {
1550         throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1551                                        "RecurrentToInputWeights must either both be present (regular LSTM) "
1552                                        "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1553                                        "accordingly.");
1554     }
1555
1556     if ( m_CellToInputWeights )
1557     {
1558         ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1559                                       n_cell, "CellToInputWeights");
1560     }
1561     if ( m_CellToForgetWeights )
1562     {
1563         ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1564                                       n_cell, "CellToForgetWeights");
1565     }
1566     if ( m_CellToOutputWeights )
1567     {
1568         ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1569                                       n_cell, "CellToOutputWeights");
1570     }
1571
1572     // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1573     bool peephole_weights_all_or_none =
1574             (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) &&  m_CellToForgetWeights
1575             && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1576             || ( !m_CellToInputWeights && !m_CellToForgetWeights
1577             && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1578     if (!peephole_weights_all_or_none)
1579     {
1580         throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1581     }
1582
1583     // Make sure the input gate bias is present only when not a CIFG-LSTM.
1584     if (m_Parameters.m_CifgEnabled)
1585     {
1586         if (m_InputGateBias)
1587         {
1588             throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1589         }
1590     }
1591     else
1592     {
1593         if (!m_InputGateBias)
1594         {
1595             throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1596                                            "must be present.");
1597         }
1598         ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1599                                       n_cell, "InputGateBias");
1600     }
1601
1602     ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1603     ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1604
1605     ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1606     ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1607
1608     ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1609     ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1610
1611     if (m_ProjectionWeights)
1612     {
1613         ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1614                                       (n_cell * n_output), "ProjectionWeights");
1615     }
1616     if (m_ProjectionBias)
1617     {
1618         ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1619     }
1620
1621     // Making sure the projection tensors are consistent:
1622     // 1) If projection weight is not present, then projection bias should not be
1623     // present.
1624     // 2) If projection weight is present, then projection bias is optional.
1625     bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1626                                         !m_Parameters.m_ProjectionEnabled)
1627                                         || (m_ProjectionWeights && !m_ProjectionBias &&
1628                                         m_Parameters.m_ProjectionEnabled)
1629                                         || (m_ProjectionWeights && m_ProjectionBias &&
1630                                         m_Parameters.m_ProjectionEnabled));
1631     if (!projecton_tensors_consistent)
1632     {
1633         throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1634     }
1635
1636     // The four layer normalization weights either all have values or none of them have values. Additionally, if
1637     // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1638     // either all have values or none of them have values. Layer normalization is used when the values of all the
1639     // layer normalization weights are present
1640     if (m_InputLayerNormWeights)
1641     {
1642         ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1643     }
1644     if (m_ForgetLayerNormWeights)
1645     {
1646         ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1647     }
1648     if (m_CellLayerNormWeights)
1649     {
1650         ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1651     }
1652     if (m_OutputLayerNormWeights)
1653     {
1654         ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1655     }
1656
1657     if (m_Parameters.m_LayerNormEnabled)
1658     {
1659         if (!m_Parameters.m_CifgEnabled)
1660         {
1661             if (!m_InputLayerNormWeights)
1662             {
1663                 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
1664                                                "disabled but InputLayerNormWeights are not present");
1665             }
1666             ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1667                                           1, n_cell, "InputLayerNormWeights");
1668         }
1669         else if (m_InputLayerNormWeights)
1670         {
1671             throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
1672                                            "enabled");
1673         }
1674
1675         ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
1676                         "ForgetLayerNormWeights");
1677         ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1678
1679         ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
1680                         "OutputLayerNormWeights");
1681         ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1682
1683         ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
1684                         "CellLayerNormWeights");
1685         ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1686     }
1687     else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1688     {
1689         throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
1690                                        "normalisation weights are present.");
1691     }
1692 }
1693
1694 void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1695 {
1696     const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
1697
1698     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1699     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1700
1701     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1702     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1703
1704     if (inputTensorInfo.GetDataType() != DataType::Float32)
1705     {
1706         throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
1707     }
1708
1709     if (outputTensorInfo.GetDataType() != DataType::Float16)
1710     {
1711         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
1712     }
1713
1714     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1715 }
1716
1717 void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1718 {
1719     const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
1720
1721     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1722     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1723
1724     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1725     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1726
1727     if (inputTensorInfo.GetDataType() != DataType::Float16)
1728     {
1729         throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
1730     }
1731
1732     if (outputTensorInfo.GetDataType() != DataType::Float32)
1733     {
1734         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
1735     }
1736
1737     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1738 }
1739
1740 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1741 {
1742     const std::string descriptorName{"DivisionQueueDescriptor"};
1743
1744     ValidateNumInputs(workloadInfo,  descriptorName, 2);
1745     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1746
1747     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1748     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1749     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1750
1751     std::vector<DataType> supportedTypes =
1752     {
1753         DataType::Float32,
1754         DataType::QuantisedAsymm8,
1755         DataType::QuantisedSymm16,
1756         DataType::Float16
1757     };
1758
1759     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1760     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1761     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1762
1763     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1764                                        inputTensorInfo1,
1765                                        outputTensorInfo,
1766                                        descriptorName,
1767                                        "input_0",
1768                                        "input_1");
1769 }
1770
1771 void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1772 {
1773     const std::string descriptorName{"SubtractionQueueDescriptor"};
1774
1775     ValidateNumInputs(workloadInfo,  descriptorName, 2);
1776     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1777
1778     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1779     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1780     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1781
1782     std::vector<DataType> supportedTypes =
1783     {
1784         DataType::Float32,
1785         DataType::QuantisedAsymm8,
1786         DataType::QuantisedSymm16,
1787         DataType::Float16
1788     };
1789
1790     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1791     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1792     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1793
1794     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1795                                        inputTensorInfo1,
1796                                        outputTensorInfo,
1797                                        descriptorName,
1798                                        "input_0",
1799                                        "input_1");
1800 }
1801
1802 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1803 {
1804     const std::string descriptorName{"MaximumQueueDescriptor"};
1805
1806     ValidateNumInputs(workloadInfo,  descriptorName, 2);
1807     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1808
1809     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1810     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1811     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1812
1813     std::vector<DataType> supportedTypes =
1814     {
1815         DataType::Float16,
1816         DataType::Float32,
1817         DataType::Signed32,
1818         DataType::QuantisedAsymm8,
1819         DataType::QuantisedSymm16
1820     };
1821
1822     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1823     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1824     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1825
1826     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1827                                        inputTensorInfo1,
1828                                        outputTensorInfo,
1829                                        descriptorName,
1830                                        "input_0",
1831                                        "input_1");
1832 }
1833
1834 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1835 {
1836     const std::string descriptorName{"MeanQueueDescriptor"};
1837
1838     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1839     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1840
1841     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1842     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1843
1844     std::vector<DataType> supportedTypes =
1845     {
1846         DataType::Float32,
1847         DataType::Float16,
1848         DataType::QuantisedAsymm8,
1849         DataType::QuantisedSymm16
1850     };
1851
1852     // First check if input tensor data type is supported, then
1853     // check if this data type matches the output tensor data type
1854     ValidateDataTypes(inputTensorInfo,  supportedTypes, descriptorName);
1855     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1856
1857     if (m_Parameters.m_KeepDims)
1858     {
1859         ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1860     }
1861     else if (m_Parameters.m_Axis.empty())
1862     {
1863         ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
1864     }
1865     else
1866     {
1867         unsigned int outputDim =
1868             inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
1869         ValidateTensorNumDimensions(outputTensorInfo,
1870                                     descriptorName,
1871                                     outputDim > 0 ? outputDim : 1,
1872                                     "output");
1873     }
1874 }
1875
1876 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1877 {
1878     const std::string descriptorName{"PadQueueDescriptor"};
1879
1880     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1881     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1882
1883     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1884     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1885
1886     // input and output should have the same number of dimensions
1887     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1888
1889     // there should be entry in the pad list for each dimension in the input tensor
1890     if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
1891         throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
1892                                        "as there are dimensions in the input tensor that is " +
1893                                        std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
1894                                        " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
1895     }
1896 }
1897
1898 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1899 {
1900     const std::string descriptorName{"QuantizeQueueDescriptor"};
1901
1902     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1903     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1904
1905     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1906     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1907
1908     std::vector<DataType> supportedTypes =
1909     {
1910             DataType::Float32,
1911             DataType::Float16
1912     };
1913
1914     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1915
1916     if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
1917         outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
1918     {
1919         throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
1920     }
1921 }
1922
1923 void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1924 {
1925     const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
1926
1927     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1928     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1929
1930     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1931     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1932
1933     std::vector<DataType> supportedTypes =
1934     {
1935             DataType::Float32,
1936             DataType::Float16,
1937             DataType::QuantisedAsymm8,
1938             DataType::QuantisedSymm16
1939     };
1940
1941     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1942     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1943 }
1944
1945 void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1946 {
1947     const std::string descriptorName{"StridedSliceQueueDescriptor"};
1948
1949     ValidateNumInputs(workloadInfo,  descriptorName, 1);
1950     ValidateNumOutputs(workloadInfo, descriptorName, 1);
1951
1952     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
1953     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1954
1955     std::vector<DataType> supportedTypes =
1956     {
1957         DataType::Float16,
1958         DataType::Float32,
1959         DataType::QuantisedAsymm8,
1960         DataType::QuantisedSymm16
1961     };
1962
1963     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1964     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1965
1966     ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1967
1968     const uint32_t rank = inputTensorInfo.GetNumDimensions();
1969     if (rank > 4)
1970     {
1971         throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1972     }
1973
1974     // Begin, End & Stride length must be of rank(input0)
1975     if (m_Parameters.m_Begin.size() != rank)
1976     {
1977         throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
1978     }
1979
1980     if (m_Parameters.m_End.size() != rank)
1981     {
1982         throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
1983     }
1984
1985     if (m_Parameters.m_Stride.size() != rank)
1986     {
1987         throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
1988     }
1989
1990     // Stride entries must be non-zero
1991     for (auto& stride : m_Parameters.m_Stride)
1992     {
1993         if (stride == 0)
1994         {
1995             throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
1996         }
1997     }
1998 }
1999
2000 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2001 {
2002     const std::string descriptorName{"MinimumQueueDescriptor"};
2003
2004     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2005     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2006
2007     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2008     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2009     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2010
2011     std::vector<DataType> supportedTypes =
2012     {
2013         DataType::Float16,
2014         DataType::Float32,
2015         DataType::Signed32,
2016         DataType::QuantisedAsymm8,
2017         DataType::QuantisedSymm16
2018     };
2019
2020     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2021     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2022     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2023
2024     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2025                                        inputTensorInfo1,
2026                                        outputTensorInfo,
2027                                        descriptorName,
2028                                        "input_0",
2029                                        "input_1");
2030 }
2031
2032 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2033 {
2034     const std::string descriptorName{"DebugQueueDescriptor"};
2035
2036     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2037     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2038 }
2039
2040 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2041 {
2042     const std::string descriptorName{"EqualQueueDescriptor"};
2043
2044     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2045     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2046
2047     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2048     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2049     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2050
2051     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2052                                        inputTensorInfo1,
2053                                        outputTensorInfo,
2054                                        descriptorName,
2055                                        "input_0",
2056                                        "input_1");
2057
2058     if (outputTensorInfo.GetDataType() != DataType::Boolean)
2059     {
2060         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2061     }
2062 }
2063
2064 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2065 {
2066     const std::string descriptorName{"GreaterQueueDescriptor"};
2067
2068     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2069     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2070
2071     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2072     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2073     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2074
2075     ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2076                                        inputTensorInfo1,
2077                                        outputTensorInfo,
2078                                        descriptorName,
2079                                        "input_0",
2080                                        "input_1");
2081
2082     if (outputTensorInfo.GetDataType() != DataType::Boolean)
2083     {
2084         throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2085     }
2086 }
2087
2088 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2089 {
2090     const std::string descriptorName{"RsqrtQueueDescriptor"};
2091
2092     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2093     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2094
2095     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2096     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2097
2098     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2099
2100     std::vector<DataType> supportedTypes =
2101     {
2102             DataType::Float16,
2103             DataType::Float32,
2104             DataType::QuantisedAsymm8,
2105             DataType::QuantisedSymm16
2106     };
2107
2108     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2109     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2110 }
2111
2112 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2113 {
2114     const std::string descriptorName{"GatherQueueDescriptor"};
2115
2116     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2117     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2118
2119     const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2120     if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2121     {
2122         throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2123     }
2124
2125     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2126     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2127
2128     std::vector<DataType> supportedTypes =
2129     {
2130             DataType::Float16,
2131             DataType::Float32,
2132             DataType::QuantisedAsymm8,
2133             DataType::QuantisedSymm16
2134     };
2135
2136     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2137
2138     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2139
2140     unsigned int outputDim  = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2141     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2142 }
2143
2144 void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2145 {
2146     const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2147
2148     ValidateNumInputs(workloadInfo, descriptorName, 2);
2149
2150     if (workloadInfo.m_OutputTensorInfos.size() != 4)
2151     {
2152         throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2153                                        to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2154     }
2155
2156     if (m_Anchors == nullptr)
2157     {
2158         throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2159     }
2160
2161     const TensorInfo& boxEncodingsInfo =  workloadInfo.m_InputTensorInfos[0];
2162     const TensorInfo& scoresInfo       =  workloadInfo.m_InputTensorInfos[1];
2163     const TensorInfo& anchorsInfo      = m_Anchors->GetTensorInfo();
2164
2165     const TensorInfo& detectionBoxesInfo   = workloadInfo.m_OutputTensorInfos[0];
2166     const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2167     const TensorInfo& detectionScoresInfo  = workloadInfo.m_OutputTensorInfos[2];
2168     const TensorInfo& numDetectionsInfo    = workloadInfo.m_OutputTensorInfos[3];
2169
2170     ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2171     ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2172     ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2173
2174     const std::vector<DataType> supportedInputTypes =
2175     {
2176         DataType::Float32,
2177         DataType::QuantisedAsymm8,
2178         DataType::QuantisedSymm16
2179     };
2180
2181     ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2182     ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2183     ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2184
2185     ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2186     ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2187     ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2188     ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2189
2190     // NOTE: Output is always Float32 regardless of input type
2191     ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2192     ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2193     ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2194     ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2195
2196     if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2197     {
2198         throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2199                                        "must be positive and less than or equal to 1.");
2200     }
2201
2202     if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2203     {
2204         throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2205                                        "should be equal to number of classes + 1.");
2206     }
2207 }
2208
2209 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2210 {
2211     const std::string& descriptorName{"DequantizeQueueDescriptor"};
2212
2213     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2214     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2215
2216     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2217     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2218
2219     if (inputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
2220         inputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
2221     {
2222         throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2223     }
2224
2225     std::vector<DataType> supportedTypes =
2226     {
2227             DataType::Float32,
2228             DataType::Float16
2229     };
2230
2231     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2232 }
2233
2234 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2235 {
2236     const std::string& descriptorName{"MergeQueueDescriptor"};
2237
2238     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2239     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2240
2241     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2242     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2243     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2244
2245     ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2246     ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2247
2248     ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2249     ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2250 }
2251
2252 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2253 {
2254     const std::string& descriptorName{"SwitchQueueDescriptor"};
2255
2256     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2257     ValidateNumOutputs(workloadInfo, descriptorName, 2);
2258
2259     const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2260     const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2261
2262     const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2263     const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2264
2265     std::vector<DataType> supportedTypes =
2266     {
2267         DataType::Float32,
2268         DataType::QuantisedAsymm8,
2269         DataType::QuantisedSymm16
2270     };
2271
2272     ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2273     ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2274
2275     ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2276     ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2277
2278     ValidateTensorShapesMatch(inputTensorInfo0,
2279                               outputTensorInfo0,
2280                               descriptorName,
2281                               "input_0",
2282                               "output_0");
2283
2284     ValidateTensorShapesMatch(inputTensorInfo0,
2285                               outputTensorInfo1,
2286                               descriptorName,
2287                               "input_0",
2288                               "output_1");
2289 }
2290
2291 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2292 {
2293     // This is internally generated so it should not need validation.
2294 }
2295
2296 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2297 {
2298     const std::string& descriptorName{"PreluQueueDescriptor"};
2299
2300     ValidateNumInputs(workloadInfo,  descriptorName, 2);
2301     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2302
2303     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2304     const TensorInfo& alphaTensorInfo  = workloadInfo.m_InputTensorInfos[1];
2305     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2306
2307     std::vector<DataType> supportedTypes
2308     {
2309         DataType::Float16,
2310         DataType::Float32,
2311         DataType::QuantisedAsymm8,
2312         DataType::QuantisedSymm16
2313     };
2314
2315     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2316     ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2317
2318     ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2319
2320     ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo,  descriptorName, "input", "alpha");
2321     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2322
2323     ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2324                                        alphaTensorInfo,
2325                                        outputTensorInfo,
2326                                        descriptorName,
2327                                        "input",
2328                                        "alpha");
2329 }
2330
2331 void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2332 {
2333     const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2334
2335     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2336     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2337
2338     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2339     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2340
2341     ValidateTensorNumDimensions(inputTensorInfo,  descriptorName, 4, "input");
2342     ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2343
2344     ValidatePointer(m_Weight, descriptorName, "weight");
2345
2346     const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2347     ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2348     ValidateTensorDataType(weightTensorInfo, inputTensorInfo.GetDataType(), descriptorName, "weight");
2349
2350     if (m_Parameters.m_BiasEnabled)
2351     {
2352         ValidatePointer(m_Bias, descriptorName, "bias");
2353
2354         const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
2355         ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
2356
2357         ValidateTensorDataType(biasTensorInfo,
2358                                GetBiasDataType(inputTensorInfo.GetDataType()),
2359                                descriptorName,
2360                                "bias");
2361
2362         ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2363     }
2364 }
2365
2366 void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2367 {
2368     const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
2369
2370     // Validate number of inputs/outputs
2371     ValidateNumInputs(workloadInfo,  descriptorName, 3);
2372     ValidateNumOutputs(workloadInfo, descriptorName, 2);
2373
2374     // Input/output tensor infos
2375     auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2376     auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
2377     auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
2378
2379     auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2380     auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2381
2382     std::vector<DataType> inputOutputSupportedTypes =
2383     {
2384         DataType::QuantisedAsymm8
2385     };
2386
2387     std::vector<DataType> cellStateSupportedTypes =
2388     {
2389         DataType::QuantisedSymm16
2390     };
2391
2392     std::vector<DataType> weightsSupportedTypes =
2393     {
2394         DataType::QuantisedAsymm8
2395     };
2396
2397     std::vector<DataType> biasSupportedTypes =
2398     {
2399         DataType::Signed32
2400     };
2401
2402     // Validate types of input/output tensors
2403     ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2404     ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2405     ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2406
2407     ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2408     ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2409
2410     // Validate matching types of input/output tensors
2411     ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2412     ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2413                                  "outputStateIn", "outputStateOut");
2414     ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2415
2416     // Validate matching quantization info for input/output tensors
2417     ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2418     ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
2419     ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2420     
2421     // Infer number of batches, input size and output size from tensor dimensions
2422     const uint32_t numBatches = inputInfo.GetShape()[0];
2423     const uint32_t inputSize  = inputInfo.GetShape()[1];
2424     const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2425
2426     // Validate number of dimensions and number of elements for input/output tensors
2427     ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2428     ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
2429     ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2430     ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
2431     ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2432
2433     // Validate number of dimensions and number of elements for weights tensors
2434     ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
2435     auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2436     ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
2437
2438     ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2439     auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2440     ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
2441
2442     ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2443     auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2444     ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
2445
2446     ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2447     auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2448     ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
2449
2450     ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
2451     auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2452     ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
2453
2454     ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2455     auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2456     ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2457                                 " RecurrentToForgetWeights");
2458
2459     ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2460     auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2461     ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2462
2463     ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2464     auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2465     ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2466
2467     // Validate data types for weights tensors (all should match each other)
2468     ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2469
2470     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2471                                  "inputToInputWeights", "inputToForgetWeights");
2472     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2473                                  "inputToInputWeights", "inputToCellWeights");
2474     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2475                                  "inputToInputWeights", "inputToOutputWeights");
2476
2477     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2478                                  "inputToInputWeights", "recurrentToInputWeights");
2479     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2480                                  "inputToInputWeights", "recurrentToForgeteights");
2481     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2482                                  "inputToInputWeights", "recurrentToCellWeights");
2483     ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2484                                  "inputToInputWeights", "recurrentToOutputWeights");
2485
2486     // Validate matching quantization info for weight tensors (all should match each other)
2487     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2488                                     descriptorName, "inputToInputWeights", "inputToForgetWeights");
2489     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2490                                     descriptorName, "inputToInputWeights", "inputToCellWeights");
2491     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2492                                     descriptorName, "inputToInputWeights", "inputToOutputWeights");
2493
2494     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2495                                     descriptorName, "inputToInputWeights", "recurrentToInputWeights");
2496     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2497                                     descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
2498     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2499                                     descriptorName, "inputToInputWeights", "recurrentToCellWeights");
2500     ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2501                                     descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
2502
2503     // Validate number of dimensions and number of elements in bias tensors
2504     ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
2505     auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2506     ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
2507
2508     ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2509     auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2510     ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
2511
2512     ValidatePointer(m_CellBias, descriptorName, "CellBias");
2513     auto cellBiasInfo = m_CellBias->GetTensorInfo();
2514     ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
2515
2516     ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2517     auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2518     ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
2519
2520     // Validate data types for bias tensors (all should match each other)
2521     ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2522
2523     ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2524                                  "inputGateBias", "forgetGateBias");
2525     ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2526                                  "inputGateBias", "cellBias");
2527     ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2528                                  "inputGateBias", "outputGateBias");
2529
2530     // Validate bias tensor quantization info
2531     ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2532     ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2533     ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2534     ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2535 }
2536
2537 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2538 {
2539     const std::string descriptorName{"AbsQueueDescriptor"};
2540
2541     ValidateNumInputs(workloadInfo,  descriptorName, 1);
2542     ValidateNumOutputs(workloadInfo, descriptorName, 1);
2543
2544     const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
2545     const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2546
2547     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2548
2549     std::vector<DataType> supportedTypes =
2550         {
2551             DataType::Float16,
2552             DataType::Float32,
2553             DataType::QuantisedAsymm8,
2554             DataType::QuantisedSymm16
2555         };
2556
2557     ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2558     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2559 }
2560
2561 } // namespace armnn