2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include "WorkloadData.hpp"
7 #include "CpuTensorHandle.hpp"
9 #include <DataLayoutIndexed.hpp>
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
19 using namespace armnnUtils;
24 //---------------------------------------------------------------
25 DataType GetBiasDataType(DataType inputDataType)
27 switch (inputDataType)
29 case DataType::Float16:
30 return DataType::Float16;
31 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 case DataType::QuantisedSymm16:
36 return DataType::Signed32;
38 BOOST_ASSERT_MSG(false, "Invalid input data type");
39 return DataType::Float32;
46 //---------------------------------------------------------------
47 //android ndk does not support std::to_string function.
49 std::string to_string(T value)
51 std::ostringstream os;
56 //---------------------------------------------------------------
57 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
61 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
62 paramName + " parameter must be set.");
66 //---------------------------------------------------------------
67 void ValidateTensorShapesMatch(const TensorInfo& first,
68 const TensorInfo& second,
69 std::string const& descName,
70 std::string const& firstName,
71 std::string const& secondName)
73 if (first.GetShape() != second.GetShape())
75 throw InvalidArgumentException(descName + ": "
76 + firstName + " & " + secondName + " must have identical shapes");
80 //---------------------------------------------------------------
81 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
83 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
85 throw InvalidArgumentException(descName +
86 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
87 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
91 //---------------------------------------------------------------
92 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
94 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
96 throw InvalidArgumentException(descName +
97 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
98 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
102 //---------------------------------------------------------------
103 void ValidateTensorNumDimensions(const TensorInfo& tensor,
104 std::string const& descName,
105 unsigned int numDimensions,
106 std::string const& tensorName)
108 if (tensor.GetNumDimensions() != numDimensions)
110 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111 to_string(tensor.GetNumDimensions()) + " dimensions for " +
112 tensorName + " tensor.");
116 //---------------------------------------------------------------
117 void ValidateTensorNumElements(const TensorInfo& tensor,
118 std::string const& descName,
119 unsigned int numElements,
120 std::string const& tensorName)
122 if (tensor.GetNumElements() != numElements)
124 throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
125 to_string(tensor.GetNumElements()) + " elements for " +
126 tensorName + " tensor.");
130 //---------------------------------------------------------------
131 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
132 unsigned int numDimension,
133 unsigned int numElements,
134 std::string const& tensorName)
136 const std::string functionName{"ValidateTensorNumDimNumElem"};
137 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
138 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
141 //---------------------------------------------------------------
142 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
143 const std::string& descName, std::string const& tensorName)
145 if (tensor.GetDataType() != dataType)
147 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
148 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
152 //---------------------------------------------------------------
153 void ValidateTensorQuantizationSpace(const TensorInfo& first,
154 const TensorInfo& second,
155 const std::string& descName,
156 std::string const& firstName,
157 std::string const& secondName)
159 if (!first.IsQuantized() ||
160 !second.IsQuantized())
162 // Not a quantized type, ignore the validation
166 DataType firstDataType = first.GetDataType();
167 DataType secondDataType = second.GetDataType();
169 if (firstDataType != secondDataType)
171 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
172 " must be of the same quantized type, " +
173 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
174 secondName + " is " + GetDataTypeName(secondDataType));
177 if (!first.IsTypeSpaceMatch(second))
179 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
180 " must have the same quantization space, " +
181 firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
182 " and scale " + to_string(first.GetQuantizationScale()) + ", " +
183 secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
184 " and scale " + to_string(second.GetQuantizationScale()));
188 //---------------------------------------------------------------
189 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
190 const TensorInfo& inputTensorInfo,
191 const TensorInfo& weightsTensorInfo,
192 const std::string& descName)
194 if (biasTensor.GetQuantizationOffset() != 0)
196 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
197 to_string(biasTensor.GetQuantizationOffset()));
199 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
200 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
202 // Print the float values with extra precision to see very small differences
203 std::stringstream msg;
204 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
205 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
206 biasTensor.GetQuantizationScale();
207 throw InvalidArgumentException(msg.str());
211 //---------------------------------------------------------------
212 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
213 unsigned int numExpected,
214 const std::string& descName,
215 const std::string& varName)
217 if (vec.empty() && numExpected > 0)
219 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
222 for (unsigned int i = 0; i < numExpected; ++i)
226 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
231 //---------------------------------------------------------------
232 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
233 const TensorInfo& second,
234 const TensorInfo& output,
235 std::string const& descName,
236 std::string const& firstName,
237 std::string const& secondName)
239 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
241 if (first.GetNumDimensions() != second.GetNumDimensions())
243 throw InvalidArgumentException(descName + ": Tensors "
244 + firstName + " & " + secondName
245 + " must have the same number of dimensions in order to be broadcasted");
247 uint32_t numDims = first.GetNumDimensions();
248 std::vector<uint32_t> outputDims(numDims, 0u);
249 for (uint32_t i = 0; i < numDims; i++)
251 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
252 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
253 if (dimsNotEqual && dimsNotOne)
255 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
257 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
259 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
260 if (broadcastShape != output.GetShape())
262 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
263 + firstName + " & " + secondName
264 + " does not match the output shape");
268 //---------------------------------------------------------------
269 void ValidateDataTypes(const TensorInfo& info,
270 const std::vector<armnn::DataType>& supportedTypes,
271 std::string const& descName)
273 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
274 if (iterator == supportedTypes.end())
276 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
280 //---------------------------------------------------------------
281 void ValidateTensorDataTypesMatch(const TensorInfo& first,
282 const TensorInfo& second,
283 std::string const& descName,
284 std::string const& firstName,
285 std::string const& secondName)
287 if (first.GetDataType() != second.GetDataType())
289 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
290 " must have identical data types.");
294 //---------------------------------------------------------------
295 void ValidateTensorNumElementsMatch(const TensorInfo& first,
296 const TensorInfo& second,
297 std::string const& descName,
298 std::string const& firstName,
299 std::string const& secondName)
301 if (first.GetNumElements() != second.GetNumElements())
303 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
304 " must have the same number of elements.");
308 } // anonymous namespace
310 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
311 unsigned int numExpectedIn, unsigned int numExpectedOut) const
313 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
314 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
317 //---------------------------------------------------------------
318 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
320 const std::string descriptorName{"MemCopyQueueDescriptor"};
322 ValidateNumInputs(workloadInfo, descriptorName, 1);
323 ValidateNumOutputs(workloadInfo, descriptorName , 1);
325 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
326 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
328 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
329 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
331 if (m_Inputs.size() != m_Outputs.size())
333 throw InvalidArgumentException(boost::str(
334 boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
335 descriptorName % m_Inputs.size() % m_Outputs.size()));
338 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
342 throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
343 descriptorName % i));
348 throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
349 descriptorName % i));
354 //---------------------------------------------------------------
355 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
357 ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
358 ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
360 if (workloadInfo.m_InputTensorInfos.size() != 1)
362 throw InvalidArgumentException(boost::str(
363 boost::format("Number of input infos (%1%) is not 1.")
364 % workloadInfo.m_InputTensorInfos.size()));
368 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
370 throw InvalidArgumentException(boost::str(
371 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
372 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
375 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
377 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
378 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
380 throw InvalidArgumentException(boost::str(
381 boost::format("Number of elements for tensor input and output %1% does not match")
386 if (m_Inputs.size() != 1)
388 throw InvalidArgumentException(boost::str(
389 boost::format("Number of inputs (%1%) is not 1.")
393 if (m_Inputs.size() != m_Outputs.size())
395 throw InvalidArgumentException(boost::str(
396 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
397 % m_Inputs.size() % m_Outputs.size()));
400 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
404 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
409 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
414 //---------------------------------------------------------------
415 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
417 ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
418 ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
420 if (m_Inputs.size() != 1)
422 throw InvalidArgumentException(boost::str(
423 boost::format("Number of inputs (%1%) is not 1.")
427 if (m_Outputs.size() != 0)
429 throw InvalidArgumentException(boost::str(
430 boost::format("Number of outputs (%1%) is not 0.")
431 % m_Inputs.size() % m_Outputs.size()));
436 throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
440 //---------------------------------------------------------------
441 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
443 const std::string descriptorName{"ActivationQueueDescriptor"};
445 ValidateNumInputs(workloadInfo, descriptorName, 1);
446 ValidateNumOutputs(workloadInfo, descriptorName, 1);
448 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
449 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
451 std::vector<DataType> supportedTypes =
455 DataType::QuantisedAsymm8,
456 DataType::QuantisedSymm16
459 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
460 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
461 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
464 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
466 const std::string descriptorName{"SoftmaxQueueDescriptor"};
468 ValidateNumInputs(workloadInfo, descriptorName, 1);
469 ValidateNumOutputs(workloadInfo, descriptorName, 1);
471 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
472 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
474 std::vector<DataType> supportedTypes =
478 DataType::QuantisedAsymm8,
479 DataType::QuantisedSymm16
482 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
483 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
484 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
487 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
489 const std::string descriptorName{"SplitterQueueDescriptor"};
491 ValidateNumInputs(workloadInfo, descriptorName, 1);
493 // Check the supported data types
494 std::vector<DataType> supportedTypes =
500 DataType::QuantisedAsymm8,
501 DataType::QuantisedSymm16
504 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
505 for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
507 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
508 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
510 const std::string outputName = "output_" + std::to_string(i);
511 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
514 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
516 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
519 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
521 throw InvalidArgumentException(
522 descriptorName + ": Number of split windows "
523 "has to match number of workloadInfo.m_OutputTensorInfos. "
524 "Number of windows: " +
525 to_string(m_ViewOrigins.size()) +
526 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
529 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
530 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
531 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
533 //Checks that the dimensionality of input is same as the split windows.
534 ViewOrigin const& e = m_ViewOrigins[w];
535 if (e.m_Origin.size() != inputDims)
537 throw InvalidArgumentException(descriptorName + ": Window origin have to "
538 "have the same dimensionality as the input tensor. "
539 "Window origin (index: " +
540 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
541 " dimensions, the input "
543 to_string(inputDims) + " dimensions.");
545 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
547 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
548 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
550 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
551 "be smaller or equal than the size of the input in that coord.");
557 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
559 const std::string descriptorName{"ConcatQueueDescriptor"};
561 ValidateNumOutputs(workloadInfo, descriptorName, 1);
563 if (m_Inputs.size() <= 0)
565 throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
567 if (m_Outputs.size() <= 0)
569 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
572 if (workloadInfo.m_InputTensorInfos.size() <= 0)
574 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
576 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
578 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
581 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
583 throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
586 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
591 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
593 throw InvalidArgumentException(
594 descriptorName + ": Number of split windows "
595 "has to match number of workloadInfo.m_InputTensorInfos. "
596 "Number of windows: " +
597 to_string(m_ViewOrigins.size()) +
598 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
601 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
602 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
603 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
605 //Checks that the dimensionality of output is same as the split windows.
606 ViewOrigin const& e = m_ViewOrigins[w];
607 if (e.m_Origin.size() != outputDims)
609 throw InvalidArgumentException(descriptorName + ": Window origin have to "
610 "have the same dimensionality as the output tensor. "
611 "Window origin (index: " +
612 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
613 " dimensions, the output "
615 to_string(outputDims) + " dimensions.");
617 //Checks that the merge windows are within the output tensor.
618 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
620 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
621 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
623 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
624 "be smaller or equal than the size of the output in that coord.");
629 // Check the supported data types
630 std::vector<DataType> supportedTypes =
636 DataType::QuantisedAsymm8,
637 DataType::QuantisedSymm16
640 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
641 for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
643 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
644 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
646 const std::string inputName = "input_" + std::to_string(i);
647 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
651 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
653 const std::string descriptorName{"StackQueueDescriptor"};
655 ValidateNumOutputs(workloadInfo, descriptorName, 1);
657 if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
659 throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
662 // All inputs must have the same shape, which is defined in parameters
663 const TensorShape& inputShape = m_Parameters.m_InputShape;
664 for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
666 if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
668 throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
672 if (inputShape.GetNumDimensions() > 4)
674 throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
677 // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
678 // since the output tensor has an additional dimension.
679 if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
681 throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
682 "than the number of input dimensions.");
685 // Output shape must be as inferred from the input shape
686 const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
687 for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
689 if (outputShape[i] != inputShape[i])
691 throw InvalidArgumentException(descriptorName + ": Output tensor must "
692 "match shape inferred from input tensor.");
696 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
698 throw InvalidArgumentException(descriptorName + ": Output tensor must "
699 "match shape inferred from input tensor.");
702 for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
704 if (outputShape[i] != inputShape[i-1])
706 throw InvalidArgumentException(descriptorName + ": Output tensor must "
707 "match shape inferred from input tensor.");
711 if (outputShape.GetNumDimensions() > 5)
713 throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
716 // Check the supported data types
717 std::vector<DataType> supportedTypes =
723 DataType::QuantisedAsymm8,
724 DataType::QuantisedSymm16
727 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
729 for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
731 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
732 workloadInfo.m_InputTensorInfos[i],
735 "input_" + std::to_string(i));
738 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
739 workloadInfo.m_OutputTensorInfos[0],
745 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
747 const std::string descriptorName{"FullyConnectedQueueDescriptor"};
749 ValidateNumInputs(workloadInfo, descriptorName, 1);
750 ValidateNumOutputs(workloadInfo, descriptorName, 1);
752 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
753 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
755 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
757 if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
759 throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
762 ValidatePointer(m_Weight, descriptorName, "weight");
764 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
765 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
767 if (m_Parameters.m_BiasEnabled)
769 ValidatePointer(m_Bias, descriptorName, "bias");
771 // Validates type and quantization values.
772 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
773 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
775 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
776 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
779 // Check the supported data types
780 std::vector<DataType> supportedTypes =
784 DataType::QuantisedAsymm8,
785 DataType::QuantisedSymm16
788 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
789 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
792 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
794 const std::string descriptorName{"NormalizationQueueDescriptor"};
796 ValidateNumInputs(workloadInfo, descriptorName, 1);
797 ValidateNumOutputs(workloadInfo, descriptorName, 1);
799 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
800 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
802 // Check the supported data types
803 std::vector<DataType> supportedTypes =
807 DataType::QuantisedAsymm8,
808 DataType::QuantisedSymm16
811 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
813 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
815 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
818 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
820 const std::string descriptorName{"AdditionQueueDescriptor"};
822 ValidateNumInputs(workloadInfo, descriptorName, 2);
823 ValidateNumOutputs(workloadInfo, descriptorName, 1);
825 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
826 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
827 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
829 std::vector<DataType> supportedTypes =
832 DataType::QuantisedAsymm8,
833 DataType::QuantisedSymm16,
837 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
838 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
839 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
841 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
842 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
844 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
852 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
854 const std::string descriptorName{"MultiplicationQueueDescriptor"};
856 ValidateNumInputs(workloadInfo, descriptorName, 2);
857 ValidateNumOutputs(workloadInfo, descriptorName, 1);
859 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
860 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
861 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
863 std::vector<DataType> supportedTypes =
866 DataType::QuantisedAsymm8,
867 DataType::QuantisedSymm16,
871 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
872 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
873 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
875 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
876 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
878 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
886 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
888 const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
890 ValidateNumInputs(workloadInfo, descriptorName, 1);
891 ValidateNumOutputs(workloadInfo, descriptorName, 1);
893 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
894 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
896 std::vector<DataType> supportedTypes =
900 DataType::QuantisedAsymm8,
901 DataType::QuantisedSymm16
904 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
905 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
907 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
908 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
909 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
911 ValidatePointer(m_Mean, descriptorName, "mean");
912 ValidatePointer(m_Variance, descriptorName, "variance");
913 ValidatePointer(m_Beta, descriptorName, "beta");
914 ValidatePointer(m_Gamma, descriptorName, "gamma");
916 const TensorInfo& mean = m_Mean->GetTensorInfo();
917 const TensorInfo& variance = m_Variance->GetTensorInfo();
918 const TensorInfo& beta = m_Beta->GetTensorInfo();
919 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
921 ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
922 ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
923 ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
924 ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
926 ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
927 ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
928 ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
931 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
933 const std::string descriptorName{"Convolution2dQueueDescriptor"};
935 ValidateNumInputs(workloadInfo, descriptorName, 1);
936 ValidateNumOutputs(workloadInfo, descriptorName, 1);
938 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
939 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
941 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
942 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
944 ValidatePointer(m_Weight, descriptorName, "weight");
946 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
947 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
949 ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
951 if (m_Parameters.m_BiasEnabled)
953 ValidatePointer(m_Bias, descriptorName, "bias");
955 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
956 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
958 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
959 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
962 std::vector<DataType> supportedTypes =
965 DataType::QuantisedAsymm8,
966 DataType::QuantisedSymm16,
970 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
971 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
974 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
976 const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
978 ValidateNumInputs(workloadInfo, descriptorName, 1);
979 ValidateNumOutputs(workloadInfo, descriptorName, 1);
981 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
982 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
984 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
985 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
987 ValidatePointer(m_Weight, descriptorName, "weight");
989 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
990 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
992 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
994 throw InvalidArgumentException(
995 boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
996 "cannot be smaller than 1.") % descriptorName %
997 m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1000 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1002 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1003 // inputChannels * channelMultiplier should be equal to outputChannels.
1004 const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1005 const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1006 const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1007 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1009 throw InvalidArgumentException(
1010 boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1011 "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1012 "(provided %4%).") % descriptorName % numWeightOutputChannels %
1013 numWeightInputChannels % numWeightChannelMultiplier));
1016 ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
1018 if (m_Parameters.m_BiasEnabled)
1020 ValidatePointer(m_Bias, descriptorName, "bias");
1022 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
1023 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1025 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1026 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1029 std::vector<DataType> supportedTypes =
1032 DataType::QuantisedAsymm8,
1033 DataType::QuantisedSymm16,
1037 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1038 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1041 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1043 const std::string descriptorName{"PermuteQueueDescriptor"};
1045 ValidateNumInputs(workloadInfo, descriptorName, 1);
1046 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1048 const PermutationVector& mapping = m_Parameters.m_DimMappings;
1050 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1051 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1053 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1054 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1056 for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1058 if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1060 throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1061 " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1062 "must match dst dimension " + to_string(mapping[i]) +
1063 " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1067 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1070 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1072 const std::string descriptorName{"Pooling2dQueueDescriptor"};
1074 ValidateNumInputs(workloadInfo, descriptorName, 1);
1075 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1077 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1078 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1080 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1081 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1083 std::vector<DataType> supportedTypes =
1087 DataType::QuantisedAsymm8,
1088 DataType::QuantisedSymm16
1091 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1092 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1095 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1097 const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1099 ValidateNumInputs(workloadInfo, descriptorName, 1);
1100 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1102 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1103 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1105 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1106 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1108 std::vector<DataType> supportedTypes =
1112 DataType::QuantisedAsymm8,
1113 DataType::QuantisedSymm16
1116 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1117 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1119 // ResizeBilinear only changes width and height: batch and channel count must match.
1120 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1121 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1122 if (inputBatchSize != outputBatchSize)
1124 throw InvalidArgumentException(
1125 boost::str(boost::format("%1%: Input batch size (%2%) "
1126 "does not match output batch size (%3%)") %
1127 descriptorName % inputBatchSize % outputBatchSize));
1130 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1131 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1132 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1133 if (inputChannelCount != outputChannelCount)
1135 throw InvalidArgumentException(
1136 boost::str(boost::format("%1%: Input channel count (%2%) "
1137 "does not match output channel count (%3%)") %
1138 descriptorName % inputChannelCount % outputChannelCount));
1142 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1144 const std::string descriptorName{"ResizeQueueDescriptor"};
1146 ValidateNumInputs(workloadInfo, descriptorName, 1);
1147 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1149 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1150 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1152 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1153 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1155 std::vector<DataType> supportedTypes =
1159 DataType::QuantisedAsymm8,
1160 DataType::QuantisedSymm16
1163 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1164 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1166 // Resize only changes width and height: batch and channel count must match.
1167 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1168 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1169 if (inputBatchSize != outputBatchSize)
1171 throw InvalidArgumentException(
1172 boost::str(boost::format("%1%: Input batch size (%2%) "
1173 "does not match output batch size (%3%)") %
1174 descriptorName % inputBatchSize % outputBatchSize));
1177 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1178 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1179 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1180 if (inputChannelCount != outputChannelCount)
1182 throw InvalidArgumentException(
1183 boost::str(boost::format("%1%: Input channel count (%2%) "
1184 "does not match output channel count (%3%)") %
1185 descriptorName % inputChannelCount % outputChannelCount));
1189 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1191 const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1193 ValidateNumInputs(workloadInfo, descriptorName, 1);
1194 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1196 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1197 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1199 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1200 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1202 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1204 if (m_Parameters.m_Min > m_Parameters.m_Max)
1206 throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1210 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1212 const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1214 ValidateNumInputs(workloadInfo, descriptorName, 1);
1215 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1217 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1218 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1220 if (inputTensorInfo.GetNumDimensions() > 4)
1222 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1225 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1227 // Check the supported data types
1228 std::vector<DataType> supportedTypes =
1232 DataType::QuantisedAsymm8,
1233 DataType::QuantisedSymm16
1236 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1237 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1239 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1242 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1244 const std::string descriptorName{"ConstantQueueDescriptor"};
1246 ValidateNumInputs(workloadInfo, descriptorName, 0);
1247 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1251 throw InvalidArgumentException(descriptorName + ": No const input specified.");
1254 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1255 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1257 // Check the supported data types
1258 std::vector<DataType> supportedTypes =
1263 DataType::QuantisedAsymm8,
1264 DataType::QuantisedSymm16
1267 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1270 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1272 const std::string descriptorName{"ReshapeQueueDescriptor"};
1274 ValidateNumInputs(workloadInfo, descriptorName, 1);
1275 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1277 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1278 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1280 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1282 // Check the supported data types
1283 std::vector<DataType> supportedTypes =
1287 DataType::QuantisedAsymm8,
1288 DataType::QuantisedSymm16
1291 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1292 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1295 void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1297 const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1299 ValidateNumInputs(workloadInfo, descriptorName, 1);
1300 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1302 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1303 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1305 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1306 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1308 if (m_Parameters.m_BlockShape.size() != 2)
1310 throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1313 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1315 throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1316 "dimensions as Block Shape.");
1319 const TensorShape& inputShape = inputTensorInfo.GetShape();
1321 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1322 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1324 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1326 const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1327 widthPad.first + widthPad.second;
1328 const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1329 heightPad.first + heightPad.second;
1331 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1332 inputShape[dimensionIndices.GetChannelsIndex()];
1333 const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1335 if (numOutputElements != numInputElements)
1337 throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1338 to_string(numInputElements) + " after padding but output tensor has " +
1339 to_string(numOutputElements) + " elements.");
1342 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1344 throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1345 "divisible by Block Shape in all spatial dimensions");
1348 std::vector<DataType> supportedTypes =
1352 DataType::QuantisedAsymm8,
1353 DataType::QuantisedSymm16
1356 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1357 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1360 void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1362 const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1364 ValidateNumInputs(workloadInfo, descriptorName, 1);
1365 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1367 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1368 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1370 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1371 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1373 std::vector<DataType> supportedTypes =
1377 DataType::QuantisedAsymm8,
1378 DataType::QuantisedSymm16
1381 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1382 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1384 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1385 const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1386 const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1387 const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1389 const TensorShape& inputShape = inputTensorInfo.GetShape();
1391 const unsigned int numInputElements =
1392 inputShape[0] * inputShape[wIndex] * inputShape[hIndex] * inputShape[cIndex];
1393 const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1395 if (numOutputElements != numInputElements)
1397 throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1398 std::to_string(numInputElements) + " but output tensor has " +
1399 std::to_string(numOutputElements) + " elements.");
1402 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1404 throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1405 "by block size in all spatial dimensions");
1409 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1411 const std::string descriptorName{"FloorQueueDescriptor"};
1413 ValidateNumInputs(workloadInfo, descriptorName, 1);
1414 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1416 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1417 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1419 std::vector<DataType> supportedTypes =
1422 DataType::QuantisedSymm16
1425 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1427 if (inputTensorInfo != outputTensorInfo)
1429 throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1433 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1435 // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1437 const std::string descriptorName{"LstmQueueDescriptor"};
1439 // check dimensions of all inputs and outputs
1440 if (workloadInfo.m_InputTensorInfos.size() != 3)
1442 throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1444 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1446 throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1449 std::vector<DataType> supportedTypes =
1453 DataType::QuantisedSymm16
1456 // check for supported type of one input and match them with all the other input and output
1457 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1459 // type matches all other inputs
1460 for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1462 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1463 workloadInfo.m_InputTensorInfos[i],
1466 "input_" + std::to_string(i));
1468 // type matches all other outputs
1469 for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1471 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1472 workloadInfo.m_OutputTensorInfos[i],
1473 "LstmQueueDescriptor",
1475 "output_" + std::to_string(i));
1478 // TODO: check clipping parameter is valid
1480 // Inferring batch size, number of outputs and number of cells from the inputs.
1481 // TODO: figure out if there is a way to make sure the specific inputs are at that index of workloadInfo
1482 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1483 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1484 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1485 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1486 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1487 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1490 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1491 descriptorName + " input_0");
1492 // outputStateInTensor
1493 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1494 descriptorName + " input_1");
1495 // outputStateInTensor
1496 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1497 descriptorName + " input_2");
1498 // scratchBufferTensor
1499 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1500 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1501 descriptorName + " output_0");
1502 // outputStateOutTensor
1503 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1504 descriptorName + " output_1");
1505 // cellStateOutTensor
1506 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1507 descriptorName + " output_2");
1509 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1510 descriptorName + " output_3");
1513 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1514 if ( m_InputToInputWeights )
1516 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1517 (n_cell * n_input), "InputLayerNormWeights");
1520 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1521 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1522 (n_cell * n_input), "InputToForgetWeights");
1524 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1525 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1526 (n_cell * n_input), "InputToCellWeights");
1528 if ( m_RecurrentToInputWeights )
1530 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1531 (n_cell * n_output), "RecurrentToInputWeights");
1534 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1535 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1536 (n_cell * n_output), "RecurrentToForgetWeights");
1538 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1539 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1540 (n_cell * n_output), "RecurrentToCellWeights");
1542 // Make sure the input-gate's parameters are either both present (regular
1543 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1544 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1545 !m_Parameters.m_CifgEnabled) ||
1546 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1547 m_Parameters.m_CifgEnabled));
1548 if (!cifg_weights_all_or_none)
1550 throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1551 "RecurrentToInputWeights must either both be present (regular LSTM) "
1552 "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1556 if ( m_CellToInputWeights )
1558 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1559 n_cell, "CellToInputWeights");
1561 if ( m_CellToForgetWeights )
1563 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1564 n_cell, "CellToForgetWeights");
1566 if ( m_CellToOutputWeights )
1568 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1569 n_cell, "CellToOutputWeights");
1572 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1573 bool peephole_weights_all_or_none =
1574 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1575 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1576 || ( !m_CellToInputWeights && !m_CellToForgetWeights
1577 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1578 if (!peephole_weights_all_or_none)
1580 throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1583 // Make sure the input gate bias is present only when not a CIFG-LSTM.
1584 if (m_Parameters.m_CifgEnabled)
1586 if (m_InputGateBias)
1588 throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1593 if (!m_InputGateBias)
1595 throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1596 "must be present.");
1598 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1599 n_cell, "InputGateBias");
1602 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1603 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1605 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1606 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1608 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1609 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1611 if (m_ProjectionWeights)
1613 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1614 (n_cell * n_output), "ProjectionWeights");
1616 if (m_ProjectionBias)
1618 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1621 // Making sure the projection tensors are consistent:
1622 // 1) If projection weight is not present, then projection bias should not be
1624 // 2) If projection weight is present, then projection bias is optional.
1625 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1626 !m_Parameters.m_ProjectionEnabled)
1627 || (m_ProjectionWeights && !m_ProjectionBias &&
1628 m_Parameters.m_ProjectionEnabled)
1629 || (m_ProjectionWeights && m_ProjectionBias &&
1630 m_Parameters.m_ProjectionEnabled));
1631 if (!projecton_tensors_consistent)
1633 throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1636 // The four layer normalization weights either all have values or none of them have values. Additionally, if
1637 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1638 // either all have values or none of them have values. Layer normalization is used when the values of all the
1639 // layer normalization weights are present
1640 if (m_InputLayerNormWeights)
1642 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1644 if (m_ForgetLayerNormWeights)
1646 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1648 if (m_CellLayerNormWeights)
1650 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1652 if (m_OutputLayerNormWeights)
1654 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1657 if (m_Parameters.m_LayerNormEnabled)
1659 if (!m_Parameters.m_CifgEnabled)
1661 if (!m_InputLayerNormWeights)
1663 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
1664 "disabled but InputLayerNormWeights are not present");
1666 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1667 1, n_cell, "InputLayerNormWeights");
1669 else if (m_InputLayerNormWeights)
1671 throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
1675 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
1676 "ForgetLayerNormWeights");
1677 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1679 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
1680 "OutputLayerNormWeights");
1681 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1683 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
1684 "CellLayerNormWeights");
1685 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1687 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1689 throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
1690 "normalisation weights are present.");
1694 void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1696 const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
1698 ValidateNumInputs(workloadInfo, descriptorName, 1);
1699 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1701 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1702 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1704 if (inputTensorInfo.GetDataType() != DataType::Float32)
1706 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
1709 if (outputTensorInfo.GetDataType() != DataType::Float16)
1711 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
1714 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1717 void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1719 const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
1721 ValidateNumInputs(workloadInfo, descriptorName, 1);
1722 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1724 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1725 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1727 if (inputTensorInfo.GetDataType() != DataType::Float16)
1729 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
1732 if (outputTensorInfo.GetDataType() != DataType::Float32)
1734 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
1737 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1740 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1742 const std::string descriptorName{"DivisionQueueDescriptor"};
1744 ValidateNumInputs(workloadInfo, descriptorName, 2);
1745 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1747 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1748 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1749 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1751 std::vector<DataType> supportedTypes =
1754 DataType::QuantisedAsymm8,
1755 DataType::QuantisedSymm16,
1759 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1760 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1761 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1763 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1771 void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1773 const std::string descriptorName{"SubtractionQueueDescriptor"};
1775 ValidateNumInputs(workloadInfo, descriptorName, 2);
1776 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1778 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1779 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1780 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1782 std::vector<DataType> supportedTypes =
1785 DataType::QuantisedAsymm8,
1786 DataType::QuantisedSymm16,
1790 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1791 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1792 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1794 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1802 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1804 const std::string descriptorName{"MaximumQueueDescriptor"};
1806 ValidateNumInputs(workloadInfo, descriptorName, 2);
1807 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1809 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1810 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1811 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1813 std::vector<DataType> supportedTypes =
1818 DataType::QuantisedAsymm8,
1819 DataType::QuantisedSymm16
1822 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1823 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1824 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1826 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1834 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1836 const std::string descriptorName{"MeanQueueDescriptor"};
1838 ValidateNumInputs(workloadInfo, descriptorName, 1);
1839 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1841 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1842 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1844 std::vector<DataType> supportedTypes =
1848 DataType::QuantisedAsymm8,
1849 DataType::QuantisedSymm16
1852 // First check if input tensor data type is supported, then
1853 // check if this data type matches the output tensor data type
1854 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1855 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1857 if (m_Parameters.m_KeepDims)
1859 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1861 else if (m_Parameters.m_Axis.empty())
1863 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
1867 unsigned int outputDim =
1868 inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
1869 ValidateTensorNumDimensions(outputTensorInfo,
1871 outputDim > 0 ? outputDim : 1,
1876 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1878 const std::string descriptorName{"PadQueueDescriptor"};
1880 ValidateNumInputs(workloadInfo, descriptorName, 1);
1881 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1883 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1884 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1886 // input and output should have the same number of dimensions
1887 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1889 // there should be entry in the pad list for each dimension in the input tensor
1890 if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
1891 throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
1892 "as there are dimensions in the input tensor that is " +
1893 std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
1894 " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
1898 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1900 const std::string descriptorName{"QuantizeQueueDescriptor"};
1902 ValidateNumInputs(workloadInfo, descriptorName, 1);
1903 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1905 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1906 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1908 std::vector<DataType> supportedTypes =
1914 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1916 if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
1917 outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
1919 throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
1923 void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1925 const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
1927 ValidateNumInputs(workloadInfo, descriptorName, 1);
1928 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1930 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1931 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1933 std::vector<DataType> supportedTypes =
1937 DataType::QuantisedAsymm8,
1938 DataType::QuantisedSymm16
1941 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1942 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1945 void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1947 const std::string descriptorName{"StridedSliceQueueDescriptor"};
1949 ValidateNumInputs(workloadInfo, descriptorName, 1);
1950 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1952 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1953 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1955 std::vector<DataType> supportedTypes =
1959 DataType::QuantisedAsymm8,
1960 DataType::QuantisedSymm16
1963 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1964 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1966 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1968 const uint32_t rank = inputTensorInfo.GetNumDimensions();
1971 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1974 // Begin, End & Stride length must be of rank(input0)
1975 if (m_Parameters.m_Begin.size() != rank)
1977 throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
1980 if (m_Parameters.m_End.size() != rank)
1982 throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
1985 if (m_Parameters.m_Stride.size() != rank)
1987 throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
1990 // Stride entries must be non-zero
1991 for (auto& stride : m_Parameters.m_Stride)
1995 throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2000 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2002 const std::string descriptorName{"MinimumQueueDescriptor"};
2004 ValidateNumInputs(workloadInfo, descriptorName, 2);
2005 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2007 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2008 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2009 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2011 std::vector<DataType> supportedTypes =
2016 DataType::QuantisedAsymm8,
2017 DataType::QuantisedSymm16
2020 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2021 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2022 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2024 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2032 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2034 const std::string descriptorName{"DebugQueueDescriptor"};
2036 ValidateNumInputs(workloadInfo, descriptorName, 1);
2037 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2040 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2042 const std::string descriptorName{"EqualQueueDescriptor"};
2044 ValidateNumInputs(workloadInfo, descriptorName, 2);
2045 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2047 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2048 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2049 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2051 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2058 if (outputTensorInfo.GetDataType() != DataType::Boolean)
2060 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2064 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2066 const std::string descriptorName{"GreaterQueueDescriptor"};
2068 ValidateNumInputs(workloadInfo, descriptorName, 2);
2069 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2071 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2072 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2073 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2075 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2082 if (outputTensorInfo.GetDataType() != DataType::Boolean)
2084 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2088 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2090 const std::string descriptorName{"RsqrtQueueDescriptor"};
2092 ValidateNumInputs(workloadInfo, descriptorName, 1);
2093 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2095 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2096 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2098 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2100 std::vector<DataType> supportedTypes =
2104 DataType::QuantisedAsymm8,
2105 DataType::QuantisedSymm16
2108 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2109 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2112 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2114 const std::string descriptorName{"GatherQueueDescriptor"};
2116 ValidateNumInputs(workloadInfo, descriptorName, 2);
2117 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2119 const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2120 if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2122 throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2125 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2126 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2128 std::vector<DataType> supportedTypes =
2132 DataType::QuantisedAsymm8,
2133 DataType::QuantisedSymm16
2136 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2138 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2140 unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2141 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2144 void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2146 const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2148 ValidateNumInputs(workloadInfo, descriptorName, 2);
2150 if (workloadInfo.m_OutputTensorInfos.size() != 4)
2152 throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2153 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2156 if (m_Anchors == nullptr)
2158 throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2161 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2162 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2163 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2165 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2166 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2167 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2168 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2170 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2171 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2172 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2174 const std::vector<DataType> supportedInputTypes =
2177 DataType::QuantisedAsymm8,
2178 DataType::QuantisedSymm16
2181 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2182 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2183 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2185 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2186 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2187 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2188 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2190 // NOTE: Output is always Float32 regardless of input type
2191 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2192 ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2193 ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2194 ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2196 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2198 throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2199 "must be positive and less than or equal to 1.");
2202 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2204 throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2205 "should be equal to number of classes + 1.");
2209 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2211 const std::string& descriptorName{"DequantizeQueueDescriptor"};
2213 ValidateNumInputs(workloadInfo, descriptorName, 1);
2214 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2216 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2217 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2219 if (inputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
2220 inputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
2222 throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2225 std::vector<DataType> supportedTypes =
2231 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2234 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2236 const std::string& descriptorName{"MergeQueueDescriptor"};
2238 ValidateNumInputs(workloadInfo, descriptorName, 2);
2239 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2241 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2242 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2243 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2245 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2246 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2248 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2249 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2252 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2254 const std::string& descriptorName{"SwitchQueueDescriptor"};
2256 ValidateNumInputs(workloadInfo, descriptorName, 2);
2257 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2259 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2260 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2262 const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2263 const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2265 std::vector<DataType> supportedTypes =
2268 DataType::QuantisedAsymm8,
2269 DataType::QuantisedSymm16
2272 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2273 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2275 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2276 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2278 ValidateTensorShapesMatch(inputTensorInfo0,
2284 ValidateTensorShapesMatch(inputTensorInfo0,
2291 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2293 // This is internally generated so it should not need validation.
2296 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2298 const std::string& descriptorName{"PreluQueueDescriptor"};
2300 ValidateNumInputs(workloadInfo, descriptorName, 2);
2301 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2303 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2304 const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2305 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2307 std::vector<DataType> supportedTypes
2311 DataType::QuantisedAsymm8,
2312 DataType::QuantisedSymm16
2315 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2316 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2318 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2320 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2321 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2323 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2331 void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2333 const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2335 ValidateNumInputs(workloadInfo, descriptorName, 1);
2336 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2338 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2339 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2341 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2342 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2344 ValidatePointer(m_Weight, descriptorName, "weight");
2346 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2347 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2348 ValidateTensorDataType(weightTensorInfo, inputTensorInfo.GetDataType(), descriptorName, "weight");
2350 if (m_Parameters.m_BiasEnabled)
2352 ValidatePointer(m_Bias, descriptorName, "bias");
2354 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
2355 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
2357 ValidateTensorDataType(biasTensorInfo,
2358 GetBiasDataType(inputTensorInfo.GetDataType()),
2362 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2366 void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2368 const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
2370 // Validate number of inputs/outputs
2371 ValidateNumInputs(workloadInfo, descriptorName, 3);
2372 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2374 // Input/output tensor infos
2375 auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2376 auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
2377 auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
2379 auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2380 auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2382 std::vector<DataType> inputOutputSupportedTypes =
2384 DataType::QuantisedAsymm8
2387 std::vector<DataType> cellStateSupportedTypes =
2389 DataType::QuantisedSymm16
2392 std::vector<DataType> weightsSupportedTypes =
2394 DataType::QuantisedAsymm8
2397 std::vector<DataType> biasSupportedTypes =
2402 // Validate types of input/output tensors
2403 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2404 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2405 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2407 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2408 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2410 // Validate matching types of input/output tensors
2411 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2412 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2413 "outputStateIn", "outputStateOut");
2414 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2416 // Validate matching quantization info for input/output tensors
2417 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2418 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
2419 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2421 // Infer number of batches, input size and output size from tensor dimensions
2422 const uint32_t numBatches = inputInfo.GetShape()[0];
2423 const uint32_t inputSize = inputInfo.GetShape()[1];
2424 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2426 // Validate number of dimensions and number of elements for input/output tensors
2427 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2428 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
2429 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2430 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
2431 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2433 // Validate number of dimensions and number of elements for weights tensors
2434 ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
2435 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2436 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
2438 ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2439 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2440 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
2442 ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2443 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2444 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
2446 ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2447 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2448 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
2450 ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
2451 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2452 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
2454 ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2455 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2456 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2457 " RecurrentToForgetWeights");
2459 ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2460 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2461 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2463 ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2464 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2465 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2467 // Validate data types for weights tensors (all should match each other)
2468 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2470 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2471 "inputToInputWeights", "inputToForgetWeights");
2472 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2473 "inputToInputWeights", "inputToCellWeights");
2474 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2475 "inputToInputWeights", "inputToOutputWeights");
2477 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2478 "inputToInputWeights", "recurrentToInputWeights");
2479 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2480 "inputToInputWeights", "recurrentToForgeteights");
2481 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2482 "inputToInputWeights", "recurrentToCellWeights");
2483 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2484 "inputToInputWeights", "recurrentToOutputWeights");
2486 // Validate matching quantization info for weight tensors (all should match each other)
2487 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2488 descriptorName, "inputToInputWeights", "inputToForgetWeights");
2489 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2490 descriptorName, "inputToInputWeights", "inputToCellWeights");
2491 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2492 descriptorName, "inputToInputWeights", "inputToOutputWeights");
2494 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2495 descriptorName, "inputToInputWeights", "recurrentToInputWeights");
2496 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2497 descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
2498 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2499 descriptorName, "inputToInputWeights", "recurrentToCellWeights");
2500 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2501 descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
2503 // Validate number of dimensions and number of elements in bias tensors
2504 ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
2505 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2506 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
2508 ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2509 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2510 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
2512 ValidatePointer(m_CellBias, descriptorName, "CellBias");
2513 auto cellBiasInfo = m_CellBias->GetTensorInfo();
2514 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
2516 ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2517 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2518 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
2520 // Validate data types for bias tensors (all should match each other)
2521 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2523 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2524 "inputGateBias", "forgetGateBias");
2525 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2526 "inputGateBias", "cellBias");
2527 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2528 "inputGateBias", "outputGateBias");
2530 // Validate bias tensor quantization info
2531 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2532 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2533 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2534 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2537 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2539 const std::string descriptorName{"AbsQueueDescriptor"};
2541 ValidateNumInputs(workloadInfo, descriptorName, 1);
2542 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2544 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2545 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2547 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2549 std::vector<DataType> supportedTypes =
2553 DataType::QuantisedAsymm8,
2554 DataType::QuantisedSymm16
2557 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2558 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2561 } // namespace armnn