2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include "WorkloadData.hpp"
7 #include "CpuTensorHandle.hpp"
9 #include <DataLayoutIndexed.hpp>
16 #include <boost/format.hpp>
17 #include <boost/numeric/conversion/cast.hpp>
19 using namespace armnnUtils;
24 //---------------------------------------------------------------
25 DataType GetBiasDataType(DataType inputDataType)
27 switch (inputDataType)
29 case DataType::Float16:
30 return DataType::Float16;
31 case DataType::Float32:
32 return DataType::Float32;
33 case DataType::QuantisedAsymm8:
34 return DataType::Signed32;
35 case DataType::QuantisedSymm16:
36 return DataType::Signed32;
38 BOOST_ASSERT_MSG(false, "Invalid input data type");
39 return DataType::Float32;
46 //---------------------------------------------------------------
47 //android ndk does not support std::to_string function.
49 std::string to_string(T value)
51 std::ostringstream os;
56 //---------------------------------------------------------------
57 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
61 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
62 paramName + " parameter must be set.");
66 //---------------------------------------------------------------
67 void ValidateTensorShapesMatch(const TensorInfo& first,
68 const TensorInfo& second,
69 std::string const& descName,
70 std::string const& firstName,
71 std::string const& secondName)
73 if (first.GetShape() != second.GetShape())
75 throw InvalidArgumentException(descName + ": "
76 + firstName + " & " + secondName + " must have identical shapes");
80 //---------------------------------------------------------------
81 void ValidateNumInputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
83 if (workloadInfo.m_InputTensorInfos.size() != expectedSize)
85 throw InvalidArgumentException(descName +
86 ": Requires exactly " + to_string(expectedSize) + "input(s). " +
87 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
91 //---------------------------------------------------------------
92 void ValidateNumOutputs(const WorkloadInfo& workloadInfo, std::string const& descName, const unsigned int expectedSize)
94 if (workloadInfo.m_OutputTensorInfos.size() != expectedSize)
96 throw InvalidArgumentException(descName +
97 ": Requires exactly " + to_string(expectedSize) + " output(s). " +
98 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
102 //---------------------------------------------------------------
103 void ValidateTensorNumDimensions(const TensorInfo& tensor,
104 std::string const& descName,
105 unsigned int numDimensions,
106 std::string const& tensorName)
108 if (tensor.GetNumDimensions() != numDimensions)
110 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
111 to_string(tensor.GetNumDimensions()) + " dimensions for " +
112 tensorName + " tensor.");
116 //---------------------------------------------------------------
117 void ValidateTensorNumElements(const TensorInfo& tensor,
118 std::string const& descName,
119 unsigned int numElements,
120 std::string const& tensorName)
122 if (tensor.GetNumElements() != numElements)
124 throw InvalidArgumentException(descName + ": Expected " + to_string(numElements) + " but got " +
125 to_string(tensor.GetNumElements()) + " elements for " +
126 tensorName + " tensor.");
130 //---------------------------------------------------------------
131 void ValidateTensorNumDimNumElem(const TensorInfo& tensorInfo,
132 unsigned int numDimension,
133 unsigned int numElements,
134 std::string const& tensorName)
136 const std::string functionName{"ValidateTensorNumDimNumElem"};
137 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
138 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
141 //---------------------------------------------------------------
142 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
143 const std::string& descName, std::string const& tensorName)
145 if (tensor.GetDataType() != dataType)
147 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
148 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
152 //---------------------------------------------------------------
153 void ValidateTensorQuantizationSpace(const TensorInfo& first,
154 const TensorInfo& second,
155 const std::string& descName,
156 std::string const& firstName,
157 std::string const& secondName)
159 if (!first.IsQuantized() ||
160 !second.IsQuantized())
162 // Not a quantized type, ignore the validation
166 DataType firstDataType = first.GetDataType();
167 DataType secondDataType = second.GetDataType();
169 if (firstDataType != secondDataType)
171 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
172 " must be of the same quantized type, " +
173 firstName + " is " + GetDataTypeName(firstDataType) + ", " +
174 secondName + " is " + GetDataTypeName(secondDataType));
177 if (!first.IsTypeSpaceMatch(second))
179 throw InvalidArgumentException(descName + ": " + firstName + " and " + secondName +
180 " must have the same quantization space, " +
181 firstName + " has offset " + to_string(first.GetQuantizationOffset()) +
182 " and scale " + to_string(first.GetQuantizationScale()) + ", " +
183 secondName + " has offset " + to_string(second.GetQuantizationOffset()) +
184 " and scale " + to_string(second.GetQuantizationScale()));
188 //---------------------------------------------------------------
189 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor,
190 const TensorInfo& inputTensorInfo,
191 const TensorInfo& weightsTensorInfo,
192 const std::string& descName)
194 if (biasTensor.GetQuantizationOffset() != 0)
196 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
197 to_string(biasTensor.GetQuantizationOffset()));
199 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
200 if (std::abs(biasTensor.GetQuantizationScale() - expectedScale) > 0.00000001f)
202 // Print the float values with extra precision to see very small differences
203 std::stringstream msg;
204 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
205 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
206 biasTensor.GetQuantizationScale();
207 throw InvalidArgumentException(msg.str());
211 //---------------------------------------------------------------
212 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
213 unsigned int numExpected,
214 const std::string& descName,
215 const std::string& varName)
217 if (vec.empty() && numExpected > 0)
219 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
222 for (unsigned int i = 0; i < numExpected; ++i)
226 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
231 //---------------------------------------------------------------
232 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
233 const TensorInfo& second,
234 const TensorInfo& output,
235 std::string const& descName,
236 std::string const& firstName,
237 std::string const& secondName)
239 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
241 if (first.GetNumDimensions() != second.GetNumDimensions())
243 throw InvalidArgumentException(descName + ": Tensors "
244 + firstName + " & " + secondName
245 + " must have the same number of dimensions in order to be broadcasted");
247 uint32_t numDims = first.GetNumDimensions();
248 std::vector<uint32_t> outputDims(numDims, 0u);
249 for (uint32_t i = 0; i < numDims; i++)
251 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
252 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
253 if (dimsNotEqual && dimsNotOne)
255 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
257 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
259 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
260 if (broadcastShape != output.GetShape())
262 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
263 + firstName + " & " + secondName
264 + " does not match the output shape");
268 //---------------------------------------------------------------
269 void ValidateDataTypes(const TensorInfo& info,
270 const std::vector<armnn::DataType>& supportedTypes,
271 std::string const& descName)
273 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.GetDataType());
274 if (iterator == supportedTypes.end())
276 throw InvalidArgumentException(descName + ": " + " Tensor type is not supported.");
280 //---------------------------------------------------------------
281 void ValidateTensorDataTypesMatch(const TensorInfo& first,
282 const TensorInfo& second,
283 std::string const& descName,
284 std::string const& firstName,
285 std::string const& secondName)
287 if (first.GetDataType() != second.GetDataType())
289 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
290 " must have identical data types.");
294 //---------------------------------------------------------------
295 void ValidateTensorNumElementsMatch(const TensorInfo& first,
296 const TensorInfo& second,
297 std::string const& descName,
298 std::string const& firstName,
299 std::string const& secondName)
301 if (first.GetNumElements() != second.GetNumElements())
303 throw InvalidArgumentException(descName + ": " + firstName + " & " + secondName +
304 " must have the same number of elements.");
308 } // anonymous namespace
310 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
311 unsigned int numExpectedIn, unsigned int numExpectedOut) const
313 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
314 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
317 //---------------------------------------------------------------
318 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
320 const std::string descriptorName{"MemCopyQueueDescriptor"};
322 ValidateNumInputs(workloadInfo, descriptorName, 1);
323 ValidateNumOutputs(workloadInfo, descriptorName , 1);
325 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
326 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
328 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
329 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
331 if (m_Inputs.size() != m_Outputs.size())
333 throw InvalidArgumentException(boost::str(
334 boost::format("%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
335 descriptorName % m_Inputs.size() % m_Outputs.size()));
338 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
342 throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL input %2%.") %
343 descriptorName % i));
348 throw InvalidArgumentException(boost::str(boost::format("%1%: Invalid NULL output %2%") %
349 descriptorName % i));
354 //---------------------------------------------------------------
355 void MemImportQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
357 ValidateNumInputs(workloadInfo, "MemImportQueueDescriptor", 1);
358 ValidateNumOutputs(workloadInfo, "MemImportQueueDescriptor" , 1);
360 if (workloadInfo.m_InputTensorInfos.size() != 1)
362 throw InvalidArgumentException(boost::str(
363 boost::format("Number of input infos (%1%) is not 1.")
364 % workloadInfo.m_InputTensorInfos.size()));
368 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
370 throw InvalidArgumentException(boost::str(
371 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
372 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
375 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
377 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
378 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
380 throw InvalidArgumentException(boost::str(
381 boost::format("Number of elements for tensor input and output %1% does not match")
386 if (m_Inputs.size() != 1)
388 throw InvalidArgumentException(boost::str(
389 boost::format("Number of inputs (%1%) is not 1.")
393 if (m_Inputs.size() != m_Outputs.size())
395 throw InvalidArgumentException(boost::str(
396 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
397 % m_Inputs.size() % m_Outputs.size()));
400 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
404 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
409 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
414 //---------------------------------------------------------------
415 void MemSyncQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
417 ValidateNumInputs(workloadInfo, "MemSyncQueueDescriptor", 1);
418 ValidateNumOutputs(workloadInfo, "MemSyncQueueDescriptor" , 1);
420 if (m_Inputs.size() != 1)
422 throw InvalidArgumentException(boost::str(
423 boost::format("Number of inputs (%1%) is not 1.")
427 if (m_Outputs.size() != 0)
429 throw InvalidArgumentException(boost::str(
430 boost::format("Number of outputs (%1%) is not 0.")
431 % m_Inputs.size() % m_Outputs.size()));
436 throw InvalidArgumentException(boost::str(boost::format("Invalid null input 0")));
440 //---------------------------------------------------------------
441 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
443 const std::string descriptorName{"ActivationQueueDescriptor"};
445 ValidateNumInputs(workloadInfo, descriptorName, 1);
446 ValidateNumOutputs(workloadInfo, descriptorName, 1);
448 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
449 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
451 std::vector<DataType> supportedTypes =
455 DataType::QuantisedAsymm8,
456 DataType::QuantisedSymm16
459 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
460 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
461 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
464 void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
466 const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
468 ValidateNumInputs(workloadInfo, descriptorName, 1);
469 ValidateNumOutputs(workloadInfo, descriptorName, 1);
471 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
472 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
474 std::vector<DataType> supportedTypes =
478 DataType::QuantisedAsymm8,
479 DataType::QuantisedSymm16
482 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
483 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
484 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
487 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
489 const std::string descriptorName{"SoftmaxQueueDescriptor"};
491 ValidateNumInputs(workloadInfo, descriptorName, 1);
492 ValidateNumOutputs(workloadInfo, descriptorName, 1);
494 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
495 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
497 std::vector<DataType> supportedTypes =
501 DataType::QuantisedAsymm8,
502 DataType::QuantisedSymm16
505 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
506 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
507 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
510 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
512 const std::string descriptorName{"SplitterQueueDescriptor"};
514 ValidateNumInputs(workloadInfo, descriptorName, 1);
516 // Check the supported data types
517 std::vector<DataType> supportedTypes =
523 DataType::QuantisedAsymm8,
524 DataType::QuantisedSymm16
527 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
528 for (unsigned long i = 0ul; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
530 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[i];
531 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
533 const std::string outputName = "output_" + std::to_string(i);
534 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", outputName);
537 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
539 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
542 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
544 throw InvalidArgumentException(
545 descriptorName + ": Number of split windows "
546 "has to match number of workloadInfo.m_OutputTensorInfos. "
547 "Number of windows: " +
548 to_string(m_ViewOrigins.size()) +
549 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
552 //The dimensionality of all the windows has to match the dimensionality (not shape) of the input.
553 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
554 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
556 //Checks that the dimensionality of input is same as the split windows.
557 ViewOrigin const& e = m_ViewOrigins[w];
558 if (e.m_Origin.size() != inputDims)
560 throw InvalidArgumentException(descriptorName + ": Window origin have to "
561 "have the same dimensionality as the input tensor. "
562 "Window origin (index: " +
563 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
564 " dimensions, the input "
566 to_string(inputDims) + " dimensions.");
568 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
570 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
571 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
573 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
574 "be smaller or equal than the size of the input in that coord.");
580 void ConcatQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
582 const std::string descriptorName{"ConcatQueueDescriptor"};
584 ValidateNumOutputs(workloadInfo, descriptorName, 1);
586 if (m_Inputs.size() <= 0)
588 throw InvalidArgumentException(descriptorName + ": At least one input needs to be provided.");
590 if (m_Outputs.size() <= 0)
592 throw InvalidArgumentException(descriptorName + ": At least one output needs to be provided.");
595 if (workloadInfo.m_InputTensorInfos.size() <= 0)
597 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo input needs to be provided.");
599 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
601 throw InvalidArgumentException(descriptorName + ": At least one TensorInfo output needs to be provided.");
604 if(m_Parameters.GetConcatAxis() > workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions())
606 throw InvalidArgumentException(descriptorName + ": Invalid concatenation axis provided.");
609 if (workloadInfo.m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
614 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
616 throw InvalidArgumentException(
617 descriptorName + ": Number of split windows "
618 "has to match number of workloadInfo.m_InputTensorInfos. "
619 "Number of windows: " +
620 to_string(m_ViewOrigins.size()) +
621 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
624 //The dimensionality of all the windows has to match the dimensionality (not shape) of the output.
625 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
626 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
628 //Checks that the dimensionality of output is same as the split windows.
629 ViewOrigin const& e = m_ViewOrigins[w];
630 if (e.m_Origin.size() != outputDims)
632 throw InvalidArgumentException(descriptorName + ": Window origin have to "
633 "have the same dimensionality as the output tensor. "
634 "Window origin (index: " +
635 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
636 " dimensions, the output "
638 to_string(outputDims) + " dimensions.");
640 //Checks that the merge windows are within the output tensor.
641 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
643 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
644 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
646 throw InvalidArgumentException(descriptorName + ": Window extent coordinates have to "
647 "be smaller or equal than the size of the output in that coord.");
652 // Check the supported data types
653 std::vector<DataType> supportedTypes =
659 DataType::QuantisedAsymm8,
660 DataType::QuantisedSymm16
663 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
664 for (unsigned long i = 0ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
666 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[i];
667 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
669 const std::string inputName = "input_" + std::to_string(i);
670 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName, "output");
674 void StackQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
676 const std::string descriptorName{"StackQueueDescriptor"};
678 ValidateNumOutputs(workloadInfo, descriptorName, 1);
680 if (m_Parameters.m_NumInputs != workloadInfo.m_InputTensorInfos.size())
682 throw InvalidArgumentException(descriptorName + ": Must have the defined number of input tensors.");
685 // All inputs must have the same shape, which is defined in parameters
686 const TensorShape& inputShape = m_Parameters.m_InputShape;
687 for (unsigned int i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
689 if (workloadInfo.m_InputTensorInfos[i].GetShape() != inputShape)
691 throw InvalidArgumentException(descriptorName + ": All input tensor shapes must match the defined shape.");
695 if (inputShape.GetNumDimensions() > 4)
697 throw InvalidArgumentException(descriptorName + ": Input tensor may have up to 4 dimensions.");
700 // m_Axis is 0-based and may take values from 0 to the number of input dimensions (inclusive),
701 // since the output tensor has an additional dimension.
702 if (m_Parameters.m_Axis > inputShape.GetNumDimensions())
704 throw InvalidArgumentException(descriptorName + ": Axis may not be greater "
705 "than the number of input dimensions.");
708 // Output shape must be as inferred from the input shape
709 const TensorShape& outputShape = workloadInfo.m_OutputTensorInfos[0].GetShape();
710 for (unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
712 if (outputShape[i] != inputShape[i])
714 throw InvalidArgumentException(descriptorName + ": Output tensor must "
715 "match shape inferred from input tensor.");
719 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
721 throw InvalidArgumentException(descriptorName + ": Output tensor must "
722 "match shape inferred from input tensor.");
725 for (unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.GetNumDimensions() + 1; ++i)
727 if (outputShape[i] != inputShape[i-1])
729 throw InvalidArgumentException(descriptorName + ": Output tensor must "
730 "match shape inferred from input tensor.");
734 if (outputShape.GetNumDimensions() > 5)
736 throw InvalidArgumentException(descriptorName + ": Output tensor may have up to 5 dimensions.");
739 // Check the supported data types
740 std::vector<DataType> supportedTypes =
746 DataType::QuantisedAsymm8,
747 DataType::QuantisedSymm16
750 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
752 for (unsigned int i = 1ul; i < workloadInfo.m_InputTensorInfos.size(); ++i)
754 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
755 workloadInfo.m_InputTensorInfos[i],
758 "input_" + std::to_string(i));
761 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
762 workloadInfo.m_OutputTensorInfos[0],
768 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
770 const std::string descriptorName{"FullyConnectedQueueDescriptor"};
772 ValidateNumInputs(workloadInfo, descriptorName, 1);
773 ValidateNumOutputs(workloadInfo, descriptorName, 1);
775 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
776 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
778 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
780 if (!(inputTensorInfo.GetNumDimensions() == 2 || inputTensorInfo.GetNumDimensions() == 4))
782 throw InvalidArgumentException(descriptorName + ": Input tensor must have 2 or 4 dimensions.");
785 ValidatePointer(m_Weight, descriptorName, "weight");
787 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
788 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2, "weight");
790 if (m_Parameters.m_BiasEnabled)
792 ValidatePointer(m_Bias, descriptorName, "bias");
794 // Validates type and quantization values.
795 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
796 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
798 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
799 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
802 // Check the supported data types
803 std::vector<DataType> supportedTypes =
807 DataType::QuantisedAsymm8,
808 DataType::QuantisedSymm16
811 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
812 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
815 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
817 const std::string descriptorName{"NormalizationQueueDescriptor"};
819 ValidateNumInputs(workloadInfo, descriptorName, 1);
820 ValidateNumOutputs(workloadInfo, descriptorName, 1);
822 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
823 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
825 // Check the supported data types
826 std::vector<DataType> supportedTypes =
830 DataType::QuantisedAsymm8,
831 DataType::QuantisedSymm16
834 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
836 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
838 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
841 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
843 const std::string descriptorName{"AdditionQueueDescriptor"};
845 ValidateNumInputs(workloadInfo, descriptorName, 2);
846 ValidateNumOutputs(workloadInfo, descriptorName, 1);
848 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
849 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
850 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
852 std::vector<DataType> supportedTypes =
855 DataType::QuantisedAsymm8,
856 DataType::QuantisedSymm16,
860 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
861 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
862 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
864 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
865 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
867 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
875 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
877 const std::string descriptorName{"MultiplicationQueueDescriptor"};
879 ValidateNumInputs(workloadInfo, descriptorName, 2);
880 ValidateNumOutputs(workloadInfo, descriptorName, 1);
882 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
883 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
884 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
886 std::vector<DataType> supportedTypes =
889 DataType::QuantisedAsymm8,
890 DataType::QuantisedSymm16,
894 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
895 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
896 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
898 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
899 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName, "input_1", "output");
901 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
909 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
911 const std::string descriptorName{"BatchNormalizationQueueDescriptor"};
913 ValidateNumInputs(workloadInfo, descriptorName, 1);
914 ValidateNumOutputs(workloadInfo, descriptorName, 1);
916 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
917 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
919 std::vector<DataType> supportedTypes =
923 DataType::QuantisedAsymm8,
924 DataType::QuantisedSymm16
927 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
928 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
930 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
931 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
932 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
934 ValidatePointer(m_Mean, descriptorName, "mean");
935 ValidatePointer(m_Variance, descriptorName, "variance");
936 ValidatePointer(m_Beta, descriptorName, "beta");
937 ValidatePointer(m_Gamma, descriptorName, "gamma");
939 const TensorInfo& mean = m_Mean->GetTensorInfo();
940 const TensorInfo& variance = m_Variance->GetTensorInfo();
941 const TensorInfo& beta = m_Beta->GetTensorInfo();
942 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
944 ValidateTensorNumDimensions(mean, descriptorName, 1, "mean");
945 ValidateTensorNumDimensions(variance, descriptorName, 1, "variance");
946 ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
947 ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
949 ValidateTensorShapesMatch(mean, variance, descriptorName, "mean", "variance");
950 ValidateTensorShapesMatch(mean, beta, descriptorName, "mean", "beta");
951 ValidateTensorShapesMatch(mean, gamma, descriptorName, "mean", "gamma");
954 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
956 const std::string descriptorName{"Convolution2dQueueDescriptor"};
958 ValidateNumInputs(workloadInfo, descriptorName, 1);
959 ValidateNumOutputs(workloadInfo, descriptorName, 1);
961 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
962 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
964 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
965 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
967 ValidatePointer(m_Weight, descriptorName, "weight");
969 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
970 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
972 ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
974 if (m_Parameters.m_BiasEnabled)
976 ValidatePointer(m_Bias, descriptorName, "bias");
978 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
979 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
981 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
982 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
985 std::vector<DataType> supportedTypes =
988 DataType::QuantisedAsymm8,
989 DataType::QuantisedSymm16,
993 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
994 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
997 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
999 const std::string descriptorName{"DepthwiseConvolution2dQueueDescriptor"};
1001 ValidateNumInputs(workloadInfo, descriptorName, 1);
1002 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1004 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1005 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1007 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1008 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1010 ValidatePointer(m_Weight, descriptorName, "weight");
1012 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1013 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
1015 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1017 throw InvalidArgumentException(
1018 boost::str(boost::format("%1%: dilationX (provided %2%) and dilationY (provided %3%) "
1019 "cannot be smaller than 1.") % descriptorName %
1020 m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1023 const unsigned int channelIndex = (m_Parameters.m_DataLayout == DataLayout::NCHW) ? 1 : 3;
1025 // Expected weight shape: [ M, I, H, W ] - This shape does NOT depend on the data layout
1026 // inputChannels * channelMultiplier should be equal to outputChannels.
1027 const unsigned int numWeightChannelMultiplier = weightTensorInfo.GetShape()[0];
1028 const unsigned int numWeightInputChannels = weightTensorInfo.GetShape()[1];
1029 const unsigned int numWeightOutputChannels = outputTensorInfo.GetShape()[channelIndex];
1030 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1032 throw InvalidArgumentException(
1033 boost::str(boost::format("%1%: output_channels (provided %2%) should be "
1034 "equal to input_channels (provided %3%) multiplied by channel_multiplier "
1035 "(provided %4%).") % descriptorName % numWeightOutputChannels %
1036 numWeightInputChannels % numWeightChannelMultiplier));
1039 ValidateTensorDataTypesMatch(inputTensorInfo, weightTensorInfo, descriptorName, "input", "weight");
1041 if (m_Parameters.m_BiasEnabled)
1043 ValidatePointer(m_Bias, descriptorName, "bias");
1045 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
1046 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
1048 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1049 ValidateTensorDataType(biasTensorInfo, GetBiasDataType(inputTensorInfo.GetDataType()), descriptorName, "bias");
1052 std::vector<DataType> supportedTypes =
1055 DataType::QuantisedAsymm8,
1056 DataType::QuantisedSymm16,
1060 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1061 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1064 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1066 const std::string descriptorName{"PermuteQueueDescriptor"};
1068 ValidateNumInputs(workloadInfo, descriptorName, 1);
1069 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1071 const PermutationVector& mapping = m_Parameters.m_DimMappings;
1073 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1074 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1076 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.GetSize(), "input");
1077 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.GetSize(), "output");
1079 for (unsigned int i = 0u; i < mapping.GetSize(); ++i)
1081 if (inputTensorInfo.GetShape()[i] != outputTensorInfo.GetShape()[mapping[i]])
1083 throw InvalidArgumentException(descriptorName + ": src dimension " + to_string(i) +
1084 " (=" + to_string(inputTensorInfo.GetShape()[i]) + ") " +
1085 "must match dst dimension " + to_string(mapping[i]) +
1086 " (=" + to_string(outputTensorInfo.GetShape()[mapping[i]]) + ")");
1090 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1093 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1095 const std::string descriptorName{"Pooling2dQueueDescriptor"};
1097 ValidateNumInputs(workloadInfo, descriptorName, 1);
1098 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1100 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1101 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1103 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1104 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1106 std::vector<DataType> supportedTypes =
1110 DataType::QuantisedAsymm8,
1111 DataType::QuantisedSymm16
1114 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1115 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1118 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1120 const std::string descriptorName{"ResizeBilinearQueueDescriptor"};
1122 ValidateNumInputs(workloadInfo, descriptorName, 1);
1123 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1125 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1126 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1128 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1129 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1131 std::vector<DataType> supportedTypes =
1135 DataType::QuantisedAsymm8,
1136 DataType::QuantisedSymm16
1139 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1140 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1142 // ResizeBilinear only changes width and height: batch and channel count must match.
1143 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1144 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1145 if (inputBatchSize != outputBatchSize)
1147 throw InvalidArgumentException(
1148 boost::str(boost::format("%1%: Input batch size (%2%) "
1149 "does not match output batch size (%3%)") %
1150 descriptorName % inputBatchSize % outputBatchSize));
1153 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1154 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1155 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1156 if (inputChannelCount != outputChannelCount)
1158 throw InvalidArgumentException(
1159 boost::str(boost::format("%1%: Input channel count (%2%) "
1160 "does not match output channel count (%3%)") %
1161 descriptorName % inputChannelCount % outputChannelCount));
1165 void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1167 const std::string descriptorName{"ResizeQueueDescriptor"};
1169 ValidateNumInputs(workloadInfo, descriptorName, 1);
1170 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1172 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1173 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1175 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1176 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1178 std::vector<DataType> supportedTypes =
1182 DataType::QuantisedAsymm8,
1183 DataType::QuantisedSymm16
1186 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1187 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1189 // Resize only changes width and height: batch and channel count must match.
1190 const unsigned int inputBatchSize = inputTensorInfo.GetShape()[0];
1191 const unsigned int outputBatchSize = outputTensorInfo.GetShape()[0];
1192 if (inputBatchSize != outputBatchSize)
1194 throw InvalidArgumentException(
1195 boost::str(boost::format("%1%: Input batch size (%2%) "
1196 "does not match output batch size (%3%)") %
1197 descriptorName % inputBatchSize % outputBatchSize));
1200 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1201 const unsigned int inputChannelCount = inputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1202 const unsigned int outputChannelCount = outputTensorInfo.GetShape()[dimensionIndices.GetChannelsIndex()];
1203 if (inputChannelCount != outputChannelCount)
1205 throw InvalidArgumentException(
1206 boost::str(boost::format("%1%: Input channel count (%2%) "
1207 "does not match output channel count (%3%)") %
1208 descriptorName % inputChannelCount % outputChannelCount));
1212 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1214 const std::string descriptorName{"FakeQuantizationQueueDescriptor"};
1216 ValidateNumInputs(workloadInfo, descriptorName, 1);
1217 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1219 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1220 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1222 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2, "input");
1223 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2, "output");
1225 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1227 if (m_Parameters.m_Min > m_Parameters.m_Max)
1229 throw InvalidArgumentException(descriptorName + ": min cannot be greater than max");
1233 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1235 const std::string descriptorName{"L2NormalizationQueueDescriptor"};
1237 ValidateNumInputs(workloadInfo, descriptorName, 1);
1238 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1240 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1241 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1243 if (inputTensorInfo.GetNumDimensions() > 4)
1245 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1248 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1250 // Check the supported data types
1251 std::vector<DataType> supportedTypes =
1255 DataType::QuantisedAsymm8,
1256 DataType::QuantisedSymm16
1259 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1260 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1262 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1265 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1267 const std::string descriptorName{"ConstantQueueDescriptor"};
1269 ValidateNumInputs(workloadInfo, descriptorName, 0);
1270 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1274 throw InvalidArgumentException(descriptorName + ": No const input specified.");
1277 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1278 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName, "constant", "output");
1280 // Check the supported data types
1281 std::vector<DataType> supportedTypes =
1286 DataType::QuantisedAsymm8,
1287 DataType::QuantisedSymm16
1290 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1293 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1295 const std::string descriptorName{"ReshapeQueueDescriptor"};
1297 ValidateNumInputs(workloadInfo, descriptorName, 1);
1298 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1300 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1301 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1303 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1305 // Check the supported data types
1306 std::vector<DataType> supportedTypes =
1310 DataType::QuantisedAsymm8,
1311 DataType::QuantisedSymm16
1314 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1315 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1318 void SpaceToBatchNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1320 const std::string descriptorName{"SpaceToBatchNdQueueDescriptor"};
1322 ValidateNumInputs(workloadInfo, descriptorName, 1);
1323 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1325 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1326 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1328 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1329 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1331 if (m_Parameters.m_BlockShape.size() != 2)
1333 throw InvalidArgumentException(descriptorName + ": Block Shape must contain 2 spatial dimensions.");
1336 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1338 throw InvalidArgumentException(descriptorName + ": Pad List must contain the same number of "
1339 "dimensions as Block Shape.");
1342 const TensorShape& inputShape = inputTensorInfo.GetShape();
1344 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1345 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1347 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1349 const unsigned int inputWidth = inputShape[dimensionIndices.GetWidthIndex()] +
1350 widthPad.first + widthPad.second;
1351 const unsigned int inputHeight = inputShape[dimensionIndices.GetHeightIndex()] +
1352 heightPad.first + heightPad.second;
1354 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1355 inputShape[dimensionIndices.GetChannelsIndex()];
1356 const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1358 if (numOutputElements != numInputElements)
1360 throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1361 to_string(numInputElements) + " after padding but output tensor has " +
1362 to_string(numOutputElements) + " elements.");
1365 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1367 throw InvalidArgumentException(descriptorName + ": Input shape after padding must be "
1368 "divisible by Block Shape in all spatial dimensions");
1371 std::vector<DataType> supportedTypes =
1375 DataType::QuantisedAsymm8,
1376 DataType::QuantisedSymm16
1379 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1380 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1383 void SpaceToDepthQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1385 const std::string descriptorName{"SpaceToDepthQueueDescriptor"};
1387 ValidateNumInputs(workloadInfo, descriptorName, 1);
1388 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1390 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1391 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1393 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
1394 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
1396 std::vector<DataType> supportedTypes =
1400 DataType::QuantisedAsymm8,
1401 DataType::QuantisedSymm16
1404 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1405 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1407 DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
1408 const unsigned int wIndex = dimensionIndices.GetWidthIndex();
1409 const unsigned int hIndex = dimensionIndices.GetHeightIndex();
1410 const unsigned int cIndex = dimensionIndices.GetChannelsIndex();
1412 const TensorShape& inputShape = inputTensorInfo.GetShape();
1414 const unsigned int numInputElements =
1415 inputShape[0] * inputShape[wIndex] * inputShape[hIndex] * inputShape[cIndex];
1416 const unsigned int numOutputElements = outputTensorInfo.GetNumElements();
1418 if (numOutputElements != numInputElements)
1420 throw InvalidArgumentException(descriptorName + ": Input tensor has " +
1421 std::to_string(numInputElements) + " but output tensor has " +
1422 std::to_string(numOutputElements) + " elements.");
1425 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1427 throw InvalidArgumentException(descriptorName + ": Input shape must be divisible "
1428 "by block size in all spatial dimensions");
1432 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1434 const std::string descriptorName{"FloorQueueDescriptor"};
1436 ValidateNumInputs(workloadInfo, descriptorName, 1);
1437 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1439 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1440 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1442 std::vector<DataType> supportedTypes =
1445 DataType::QuantisedSymm16
1448 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1450 if (inputTensorInfo != outputTensorInfo)
1452 throw InvalidArgumentException(descriptorName + ": Input and output tensor infos do not match.");
1456 void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1458 // ported from android/ml/nn/common/operations/LSTM.cpp CheckInputTensorDimensions()
1460 const std::string descriptorName{"LstmQueueDescriptor"};
1462 // check dimensions of all inputs and outputs
1463 if (workloadInfo.m_InputTensorInfos.size() != 3)
1465 throw InvalidArgumentException(descriptorName + ": Invalid number of inputs.");
1467 if (workloadInfo.m_OutputTensorInfos.size() != 4)
1469 throw InvalidArgumentException(descriptorName + ": Invalid number of outputs.");
1472 std::vector<DataType> supportedTypes =
1476 DataType::QuantisedSymm16
1479 // check for supported type of one input and match them with all the other input and output
1480 ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, descriptorName);
1482 // type matches all other inputs
1483 for (uint32_t i = 1u; i < workloadInfo.m_InputTensorInfos.size(); ++i)
1485 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1486 workloadInfo.m_InputTensorInfos[i],
1489 "input_" + std::to_string(i));
1491 // type matches all other outputs
1492 for (uint32_t i = 0u; i < workloadInfo.m_OutputTensorInfos.size(); ++i)
1494 ValidateTensorDataTypesMatch(workloadInfo.m_InputTensorInfos[0],
1495 workloadInfo.m_OutputTensorInfos[i],
1496 "LstmQueueDescriptor",
1498 "output_" + std::to_string(i));
1501 // TODO: check clipping parameter is valid
1503 // Inferring batch size, number of outputs and number of cells from the inputs.
1504 // TODO: figure out if there is a way to make sure the specific inputs are at that index of workloadInfo
1505 const uint32_t n_input = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
1506 const uint32_t n_batch = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
1507 ValidatePointer(m_InputToOutputWeights, "Null pointer check", "InputToOutputWeights");
1508 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1509 ValidatePointer(m_RecurrentToOutputWeights, "Null pointer check", "RecurrentToOutputWeights");
1510 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1513 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[0], 2, (n_batch * n_input),
1514 descriptorName + " input_0");
1515 // outputStateInTensor
1516 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[1], 2, (n_batch * n_output),
1517 descriptorName + " input_1");
1518 // outputStateInTensor
1519 ValidateTensorNumDimNumElem(workloadInfo.m_InputTensorInfos[2], 2, (n_batch * n_cell),
1520 descriptorName + " input_2");
1521 // scratchBufferTensor
1522 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1523 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1524 descriptorName + " output_0");
1525 // outputStateOutTensor
1526 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[1], 2, (n_batch * n_output),
1527 descriptorName + " output_1");
1528 // cellStateOutTensor
1529 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[2], 2, (n_batch * n_cell),
1530 descriptorName + " output_2");
1532 ValidateTensorNumDimNumElem(workloadInfo.m_OutputTensorInfos[3], 2, (n_batch * n_output),
1533 descriptorName + " output_3");
1536 // check that dimensions of inputs/outputs and QueueDescriptor data match with each other
1537 if ( m_InputToInputWeights )
1539 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1540 (n_cell * n_input), "InputLayerNormWeights");
1543 ValidatePointer(m_InputToForgetWeights, "Null pointer check", "InputToForgetWeights");
1544 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1545 (n_cell * n_input), "InputToForgetWeights");
1547 ValidatePointer(m_InputToCellWeights, "Null pointer check", "InputToCellWeights");
1548 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1549 (n_cell * n_input), "InputToCellWeights");
1551 if ( m_RecurrentToInputWeights )
1553 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1554 (n_cell * n_output), "RecurrentToInputWeights");
1557 ValidatePointer(m_RecurrentToForgetWeights, "Null pointer check", "RecurrentToForgetWeights");
1558 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1559 (n_cell * n_output), "RecurrentToForgetWeights");
1561 ValidatePointer(m_RecurrentToCellWeights, "Null pointer check", "RecurrentToCellWeights");
1562 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1563 (n_cell * n_output), "RecurrentToCellWeights");
1565 // Make sure the input-gate's parameters are either both present (regular
1566 // LSTM) or not at all (CIFG-LSTM). And CifgEnable is set accordingly.
1567 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1568 !m_Parameters.m_CifgEnabled) ||
1569 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1570 m_Parameters.m_CifgEnabled));
1571 if (!cifg_weights_all_or_none)
1573 throw InvalidArgumentException(descriptorName + ": Input-Gate's parameters InputToInputWeights and "
1574 "RecurrentToInputWeights must either both be present (regular LSTM) "
1575 "or both not present (CIFG-LSTM). In addition CifgEnable must be set "
1579 if ( m_CellToInputWeights )
1581 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1582 n_cell, "CellToInputWeights");
1584 if ( m_CellToForgetWeights )
1586 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1587 n_cell, "CellToForgetWeights");
1589 if ( m_CellToOutputWeights )
1591 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1592 n_cell, "CellToOutputWeights");
1595 // Making sure the peephole weights are there all or none. And PeepholeEnable is set accordingly.
1596 bool peephole_weights_all_or_none =
1597 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1598 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1599 || ( !m_CellToInputWeights && !m_CellToForgetWeights
1600 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1601 if (!peephole_weights_all_or_none)
1603 throw InvalidArgumentException(descriptorName + ": Invalid combination of peephole parameters.");
1606 // Make sure the input gate bias is present only when not a CIFG-LSTM.
1607 if (m_Parameters.m_CifgEnabled)
1609 if (m_InputGateBias)
1611 throw InvalidArgumentException(descriptorName + ": InputGateBias is present and CIFG-LSTM is enabled.");
1616 if (!m_InputGateBias)
1618 throw InvalidArgumentException(descriptorName + ": If CIFG-LSTM is disabled InputGateBias "
1619 "must be present.");
1621 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1622 n_cell, "InputGateBias");
1625 ValidatePointer(m_ForgetGateBias, "Null pointer check", "ForgetGateBias");
1626 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell, "ForgetGateBias");
1628 ValidatePointer(m_CellBias, "Null pointer check", "CellBias");
1629 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell, "CellBias");
1631 ValidatePointer(m_OutputGateBias, "Null pointer check", "OutputGateBias");
1632 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell, "OutputGateBias");
1634 if (m_ProjectionWeights)
1636 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1637 (n_cell * n_output), "ProjectionWeights");
1639 if (m_ProjectionBias)
1641 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output, "ProjectionBias");
1644 // Making sure the projection tensors are consistent:
1645 // 1) If projection weight is not present, then projection bias should not be
1647 // 2) If projection weight is present, then projection bias is optional.
1648 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1649 !m_Parameters.m_ProjectionEnabled)
1650 || (m_ProjectionWeights && !m_ProjectionBias &&
1651 m_Parameters.m_ProjectionEnabled)
1652 || (m_ProjectionWeights && m_ProjectionBias &&
1653 m_Parameters.m_ProjectionEnabled));
1654 if (!projecton_tensors_consistent)
1656 throw InvalidArgumentException(descriptorName + ": Projection tensors are inconsistent.");
1659 // The four layer normalization weights either all have values or none of them have values. Additionally, if
1660 // CIFG is used, input layer normalization weights tensor is omitted and the other layer normalization weights
1661 // either all have values or none of them have values. Layer normalization is used when the values of all the
1662 // layer normalization weights are present
1663 if (m_InputLayerNormWeights)
1665 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell, "InputLayerNormWeights");
1667 if (m_ForgetLayerNormWeights)
1669 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1671 if (m_CellLayerNormWeights)
1673 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1675 if (m_OutputLayerNormWeights)
1677 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1680 if (m_Parameters.m_LayerNormEnabled)
1682 if (!m_Parameters.m_CifgEnabled)
1684 if (!m_InputLayerNormWeights)
1686 throw InvalidArgumentException(descriptorName + ": Layer normalisation is enabled and CIFG-LSTM is "
1687 "disabled but InputLayerNormWeights are not present");
1689 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1690 1, n_cell, "InputLayerNormWeights");
1692 else if (m_InputLayerNormWeights)
1694 throw InvalidArgumentException(descriptorName + ":InputLayerNormWeights are present while CIFG is "
1698 ValidatePointer(m_ForgetLayerNormWeights, "Null pointer check layer normalisation enabled",
1699 "ForgetLayerNormWeights");
1700 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell, "ForgetLayerNormWeights");
1702 ValidatePointer(m_OutputLayerNormWeights, "Null pointer check layer normalisation enabled",
1703 "OutputLayerNormWeights");
1704 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell, "OutputLayerNormWeights");
1706 ValidatePointer(m_CellLayerNormWeights, "Null pointer check layer normalisation enabled",
1707 "CellLayerNormWeights");
1708 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell, "CellLayerNormWeights");
1710 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
1712 throw InvalidArgumentException(descriptorName + ": Layer normalisation is disabled but one or more layer "
1713 "normalisation weights are present.");
1717 void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1719 const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};
1721 ValidateNumInputs(workloadInfo, descriptorName, 1);
1722 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1724 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1725 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1727 if (inputTensorInfo.GetDataType() != DataType::Float32)
1729 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
1732 if (outputTensorInfo.GetDataType() != DataType::Float16)
1734 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float16.");
1737 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1740 void ConvertFp16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1742 const std::string descriptorName{"ConvertFp16ToFp32QueueDescriptor"};
1744 ValidateNumInputs(workloadInfo, descriptorName, 1);
1745 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1747 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1748 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1750 if (inputTensorInfo.GetDataType() != DataType::Float16)
1752 throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float16.");
1755 if (outputTensorInfo.GetDataType() != DataType::Float32)
1757 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
1760 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1763 void DivisionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1765 const std::string descriptorName{"DivisionQueueDescriptor"};
1767 ValidateNumInputs(workloadInfo, descriptorName, 2);
1768 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1770 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1771 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1772 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1774 std::vector<DataType> supportedTypes =
1777 DataType::QuantisedAsymm8,
1778 DataType::QuantisedSymm16,
1782 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1783 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1784 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1786 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1794 void SubtractionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1796 const std::string descriptorName{"SubtractionQueueDescriptor"};
1798 ValidateNumInputs(workloadInfo, descriptorName, 2);
1799 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1801 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1802 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1803 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1805 std::vector<DataType> supportedTypes =
1808 DataType::QuantisedAsymm8,
1809 DataType::QuantisedSymm16,
1813 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1814 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1815 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1817 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1825 void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1827 const std::string descriptorName{"MaximumQueueDescriptor"};
1829 ValidateNumInputs(workloadInfo, descriptorName, 2);
1830 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1832 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
1833 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
1834 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1836 std::vector<DataType> supportedTypes =
1841 DataType::QuantisedAsymm8,
1842 DataType::QuantisedSymm16
1845 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1846 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1847 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1849 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1857 void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1859 const std::string descriptorName{"MeanQueueDescriptor"};
1861 ValidateNumInputs(workloadInfo, descriptorName, 1);
1862 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1864 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1865 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1867 std::vector<DataType> supportedTypes =
1871 DataType::QuantisedAsymm8,
1872 DataType::QuantisedSymm16
1875 // First check if input tensor data type is supported, then
1876 // check if this data type matches the output tensor data type
1877 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1878 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1880 if (m_Parameters.m_KeepDims)
1882 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1884 else if (m_Parameters.m_Axis.empty())
1886 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
1890 unsigned int outputDim =
1891 inputTensorInfo.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
1892 ValidateTensorNumDimensions(outputTensorInfo,
1894 outputDim > 0 ? outputDim : 1,
1899 void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1901 const std::string descriptorName{"PadQueueDescriptor"};
1903 ValidateNumInputs(workloadInfo, descriptorName, 1);
1904 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1906 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1907 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1909 // input and output should have the same number of dimensions
1910 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.GetNumDimensions(), "output");
1912 // there should be entry in the pad list for each dimension in the input tensor
1913 if (m_Parameters.m_PadList.size() != inputTensorInfo.GetNumDimensions()) {
1914 throw InvalidArgumentException(descriptorName + ":Pad List should contain the same number of entries "
1915 "as there are dimensions in the input tensor that is " +
1916 std::to_string(inputTensorInfo.GetNumDimensions()) + " entries " +
1917 " not " + std::to_string(m_Parameters.m_PadList.size()) + " entries.");
1921 void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1923 const std::string descriptorName{"QuantizeQueueDescriptor"};
1925 ValidateNumInputs(workloadInfo, descriptorName, 1);
1926 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1928 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1929 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1931 std::vector<DataType> supportedTypes =
1937 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1939 if (outputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
1940 outputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
1942 throw InvalidArgumentException(descriptorName + ": Output of quantized layer must be quantized type.");
1946 void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1948 const std::string descriptorName{"BatchToSpaceNdQueueDescriptor"};
1950 ValidateNumInputs(workloadInfo, descriptorName, 1);
1951 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1953 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1954 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1956 std::vector<DataType> supportedTypes =
1960 DataType::QuantisedAsymm8,
1961 DataType::QuantisedSymm16
1964 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1965 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1968 void StridedSliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
1970 const std::string descriptorName{"StridedSliceQueueDescriptor"};
1972 ValidateNumInputs(workloadInfo, descriptorName, 1);
1973 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1975 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
1976 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
1978 std::vector<DataType> supportedTypes =
1982 DataType::QuantisedAsymm8,
1983 DataType::QuantisedSymm16
1986 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1987 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1989 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
1991 const uint32_t rank = inputTensorInfo.GetNumDimensions();
1994 throw InvalidArgumentException(descriptorName + ": Input tensors with rank greater than 4 are not supported.");
1997 // Begin, End & Stride length must be of rank(input0)
1998 if (m_Parameters.m_Begin.size() != rank)
2000 throw InvalidArgumentException(descriptorName + ": Begin length must be of rank " + std::to_string(rank));
2003 if (m_Parameters.m_End.size() != rank)
2005 throw InvalidArgumentException(descriptorName + ": End length must be of rank " + std::to_string(rank));
2008 if (m_Parameters.m_Stride.size() != rank)
2010 throw InvalidArgumentException(descriptorName + ": Stride length must be of rank " + std::to_string(rank));
2013 // Stride entries must be non-zero
2014 for (auto& stride : m_Parameters.m_Stride)
2018 throw InvalidArgumentException(descriptorName + ": Stride entries must be non-zero.");
2023 void MinimumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2025 const std::string descriptorName{"MinimumQueueDescriptor"};
2027 ValidateNumInputs(workloadInfo, descriptorName, 2);
2028 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2030 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2031 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2032 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2034 std::vector<DataType> supportedTypes =
2039 DataType::QuantisedAsymm8,
2040 DataType::QuantisedSymm16
2043 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2044 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2045 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2047 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2055 void DebugQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2057 const std::string descriptorName{"DebugQueueDescriptor"};
2059 ValidateNumInputs(workloadInfo, descriptorName, 1);
2060 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2063 void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2065 const std::string descriptorName{"EqualQueueDescriptor"};
2067 ValidateNumInputs(workloadInfo, descriptorName, 2);
2068 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2070 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2071 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2072 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2074 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2081 if (outputTensorInfo.GetDataType() != DataType::Boolean)
2083 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2087 void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2089 const std::string descriptorName{"GreaterQueueDescriptor"};
2091 ValidateNumInputs(workloadInfo, descriptorName, 2);
2092 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2094 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2095 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2096 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2098 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2105 if (outputTensorInfo.GetDataType() != DataType::Boolean)
2107 throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
2111 void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2113 const std::string descriptorName{"RsqrtQueueDescriptor"};
2115 ValidateNumInputs(workloadInfo, descriptorName, 1);
2116 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2118 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2119 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2121 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2123 std::vector<DataType> supportedTypes =
2127 DataType::QuantisedAsymm8,
2128 DataType::QuantisedSymm16
2131 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2132 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2135 void GatherQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2137 const std::string descriptorName{"GatherQueueDescriptor"};
2139 ValidateNumInputs(workloadInfo, descriptorName, 2);
2140 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2142 const TensorInfo& indicesTensorInfo = workloadInfo.m_InputTensorInfos[1];
2143 if (indicesTensorInfo.GetDataType() != DataType::Signed32)
2145 throw InvalidArgumentException(descriptorName + ": Indices tensor type must be Int32.");
2148 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2149 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2151 std::vector<DataType> supportedTypes =
2155 DataType::QuantisedAsymm8,
2156 DataType::QuantisedSymm16
2159 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2161 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2163 unsigned int outputDim = inputTensorInfo.GetNumDimensions() + indicesTensorInfo.GetNumDimensions() - 1;
2164 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim, "output");
2167 void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2169 const std::string& descriptorName{"DetectionPostProcessQueueDescriptor"};
2171 ValidateNumInputs(workloadInfo, descriptorName, 2);
2173 if (workloadInfo.m_OutputTensorInfos.size() != 4)
2175 throw InvalidArgumentException(descriptorName + ": Requires exactly four outputs. " +
2176 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
2179 if (m_Anchors == nullptr)
2181 throw InvalidArgumentException(descriptorName + ": Anchors tensor descriptor is missing.");
2184 const TensorInfo& boxEncodingsInfo = workloadInfo.m_InputTensorInfos[0];
2185 const TensorInfo& scoresInfo = workloadInfo.m_InputTensorInfos[1];
2186 const TensorInfo& anchorsInfo = m_Anchors->GetTensorInfo();
2188 const TensorInfo& detectionBoxesInfo = workloadInfo.m_OutputTensorInfos[0];
2189 const TensorInfo& detectionClassesInfo = workloadInfo.m_OutputTensorInfos[1];
2190 const TensorInfo& detectionScoresInfo = workloadInfo.m_OutputTensorInfos[2];
2191 const TensorInfo& numDetectionsInfo = workloadInfo.m_OutputTensorInfos[3];
2193 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3, "box encodings");
2194 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3, "scores");
2195 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2, "anchors");
2197 const std::vector<DataType> supportedInputTypes =
2200 DataType::QuantisedAsymm8,
2201 DataType::QuantisedSymm16
2204 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2205 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2206 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2208 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3, "detection boxes");
2209 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2, "detection scores");
2210 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2, "detection classes");
2211 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1, "num detections");
2213 // NOTE: Output is always Float32 regardless of input type
2214 ValidateTensorDataType(detectionBoxesInfo, DataType::Float32, descriptorName, "detection boxes");
2215 ValidateTensorDataType(detectionScoresInfo, DataType::Float32, descriptorName, "detection scores");
2216 ValidateTensorDataType(detectionClassesInfo, DataType::Float32, descriptorName, "detection classes");
2217 ValidateTensorDataType(numDetectionsInfo, DataType::Float32, descriptorName, "num detections");
2219 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2221 throw InvalidArgumentException(descriptorName + ": Intersection over union threshold "
2222 "must be positive and less than or equal to 1.");
2225 if (scoresInfo.GetShape()[2] != m_Parameters.m_NumClasses + 1)
2227 throw InvalidArgumentException(descriptorName + ": Number of classes with background "
2228 "should be equal to number of classes + 1.");
2232 void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2234 const std::string& descriptorName{"DequantizeQueueDescriptor"};
2236 ValidateNumInputs(workloadInfo, descriptorName, 1);
2237 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2239 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2240 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2242 if (inputTensorInfo.GetDataType() != DataType::QuantisedAsymm8 &&
2243 inputTensorInfo.GetDataType() != DataType::QuantisedSymm16)
2245 throw InvalidArgumentException(descriptorName + ": Input to dequantize layer must be quantized type.");
2248 std::vector<DataType> supportedTypes =
2254 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2257 void MergeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2259 const std::string& descriptorName{"MergeQueueDescriptor"};
2261 ValidateNumInputs(workloadInfo, descriptorName, 2);
2262 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2264 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2265 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2266 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2268 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2269 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2271 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName, "input_0", "input_1");
2272 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName, "input_0", "output");
2275 void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2277 const std::string& descriptorName{"SwitchQueueDescriptor"};
2279 ValidateNumInputs(workloadInfo, descriptorName, 2);
2280 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2282 const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
2283 const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
2285 const TensorInfo& outputTensorInfo0 = workloadInfo.m_OutputTensorInfos[0];
2286 const TensorInfo& outputTensorInfo1 = workloadInfo.m_OutputTensorInfos[1];
2288 std::vector<DataType> supportedTypes =
2291 DataType::QuantisedAsymm8,
2292 DataType::QuantisedSymm16
2295 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2296 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2298 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2299 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2301 ValidateTensorShapesMatch(inputTensorInfo0,
2307 ValidateTensorShapesMatch(inputTensorInfo0,
2314 void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2316 // This is internally generated so it should not need validation.
2319 void PreluQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2321 const std::string& descriptorName{"PreluQueueDescriptor"};
2323 ValidateNumInputs(workloadInfo, descriptorName, 2);
2324 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2326 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2327 const TensorInfo& alphaTensorInfo = workloadInfo.m_InputTensorInfos[1];
2328 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2330 std::vector<DataType> supportedTypes
2334 DataType::QuantisedAsymm8,
2335 DataType::QuantisedSymm16
2338 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2339 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2341 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2343 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName, "input", "alpha");
2344 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "ouptut");
2346 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2354 void TransposeConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2356 const std::string descriptorName{"TransposeConvolution2dQueueDescriptor"};
2358 ValidateNumInputs(workloadInfo, descriptorName, 1);
2359 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2361 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2362 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2364 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4, "input");
2365 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4, "output");
2367 ValidatePointer(m_Weight, descriptorName, "weight");
2369 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2370 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4, "weight");
2371 ValidateTensorDataType(weightTensorInfo, inputTensorInfo.GetDataType(), descriptorName, "weight");
2373 if (m_Parameters.m_BiasEnabled)
2375 ValidatePointer(m_Bias, descriptorName, "bias");
2377 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
2378 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1, "bias");
2380 ValidateTensorDataType(biasTensorInfo,
2381 GetBiasDataType(inputTensorInfo.GetDataType()),
2385 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2389 void QuantizedLstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2391 const std::string descriptorName{"QuantizedLstmQueueDescriptor"};
2393 // Validate number of inputs/outputs
2394 ValidateNumInputs(workloadInfo, descriptorName, 3);
2395 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2397 // Input/output tensor infos
2398 auto inputInfo = workloadInfo.m_InputTensorInfos[0];
2399 auto cellStateInInfo = workloadInfo.m_InputTensorInfos[1];
2400 auto outputStateInInfo = workloadInfo.m_InputTensorInfos[2];
2402 auto cellStateOutInfo = workloadInfo.m_OutputTensorInfos[0];
2403 auto outputStateOutInfo = workloadInfo.m_OutputTensorInfos[1];
2405 std::vector<DataType> inputOutputSupportedTypes =
2407 DataType::QuantisedAsymm8
2410 std::vector<DataType> cellStateSupportedTypes =
2412 DataType::QuantisedSymm16
2415 std::vector<DataType> weightsSupportedTypes =
2417 DataType::QuantisedAsymm8
2420 std::vector<DataType> biasSupportedTypes =
2425 // Validate types of input/output tensors
2426 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2427 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2428 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2430 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2431 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2433 // Validate matching types of input/output tensors
2434 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2435 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2436 "outputStateIn", "outputStateOut");
2437 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2439 // Validate matching quantization info for input/output tensors
2440 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName, "input", "outputStateIn");
2441 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName, "input", "outputStateOut");
2442 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName, "cellStateIn", "cellStateOut");
2444 // Infer number of batches, input size and output size from tensor dimensions
2445 const uint32_t numBatches = inputInfo.GetShape()[0];
2446 const uint32_t inputSize = inputInfo.GetShape()[1];
2447 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2449 // Validate number of dimensions and number of elements for input/output tensors
2450 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName + " input");
2451 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName + " cellStateIn");
2452 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName + " outputStateIn");
2453 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName + " cellStateOut");
2454 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName + " outputStateOut");
2456 // Validate number of dimensions and number of elements for weights tensors
2457 ValidatePointer(m_InputToInputWeights, descriptorName, "InputToInputWeights");
2458 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2459 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize), " InputToInputWeights");
2461 ValidatePointer(m_InputToForgetWeights, descriptorName, "InputToForgetWeights");
2462 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2463 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize), " InputToForgetWeights");
2465 ValidatePointer(m_InputToCellWeights, descriptorName, "InputToCellWeights");
2466 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2467 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize), " InputToCellWeights");
2469 ValidatePointer(m_InputToOutputWeights, descriptorName, "InputToOutputWeights");
2470 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2471 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize), " InputToOutputWeights");
2473 ValidatePointer(m_RecurrentToInputWeights, descriptorName, "RecurrentToInputWeights");
2474 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2475 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToInputWeights");
2477 ValidatePointer(m_RecurrentToForgetWeights, descriptorName, "RecurrentToForgetWeights");
2478 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2479 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2480 " RecurrentToForgetWeights");
2482 ValidatePointer(m_RecurrentToCellWeights, descriptorName, "RecurrentToCellWeights");
2483 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2484 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2486 ValidatePointer(m_RecurrentToOutputWeights, descriptorName, "RecurrentToOutputWeights");
2487 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2488 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize), " RecurrentToCellWeights");
2490 // Validate data types for weights tensors (all should match each other)
2491 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2493 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2494 "inputToInputWeights", "inputToForgetWeights");
2495 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2496 "inputToInputWeights", "inputToCellWeights");
2497 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2498 "inputToInputWeights", "inputToOutputWeights");
2500 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2501 "inputToInputWeights", "recurrentToInputWeights");
2502 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2503 "inputToInputWeights", "recurrentToForgeteights");
2504 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2505 "inputToInputWeights", "recurrentToCellWeights");
2506 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2507 "inputToInputWeights", "recurrentToOutputWeights");
2509 // Validate matching quantization info for weight tensors (all should match each other)
2510 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2511 descriptorName, "inputToInputWeights", "inputToForgetWeights");
2512 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2513 descriptorName, "inputToInputWeights", "inputToCellWeights");
2514 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2515 descriptorName, "inputToInputWeights", "inputToOutputWeights");
2517 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2518 descriptorName, "inputToInputWeights", "recurrentToInputWeights");
2519 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2520 descriptorName, "inputToInputWeights", "recurrentToForgetWeights");
2521 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2522 descriptorName, "inputToInputWeights", "recurrentToCellWeights");
2523 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2524 descriptorName, "inputToInputWeights", "recurrentToOutputWeights");
2526 // Validate number of dimensions and number of elements in bias tensors
2527 ValidatePointer(m_InputGateBias, descriptorName, "InputGateBias");
2528 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2529 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize, " InputGateBias");
2531 ValidatePointer(m_ForgetGateBias, descriptorName, "ForgetGateBias");
2532 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2533 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize, " ForgetGateBias");
2535 ValidatePointer(m_CellBias, descriptorName, "CellBias");
2536 auto cellBiasInfo = m_CellBias->GetTensorInfo();
2537 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize, " CellBias");
2539 ValidatePointer(m_OutputGateBias, descriptorName, "OutputGateBias");
2540 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2541 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize, " OutputGateBias");
2543 // Validate data types for bias tensors (all should match each other)
2544 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2546 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2547 "inputGateBias", "forgetGateBias");
2548 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2549 "inputGateBias", "cellBias");
2550 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2551 "inputGateBias", "outputGateBias");
2553 // Validate bias tensor quantization info
2554 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2555 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2556 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2557 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2560 void AbsQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
2562 const std::string descriptorName{"AbsQueueDescriptor"};
2564 ValidateNumInputs(workloadInfo, descriptorName, 1);
2565 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2567 const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
2568 const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
2570 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2572 std::vector<DataType> supportedTypes =
2576 DataType::QuantisedAsymm8,
2577 DataType::QuantisedSymm16
2580 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2581 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
2584 } // namespace armnn