16 #include <boost/format.hpp> 17 #include <boost/numeric/conversion/cast.hpp> 27 switch (inputDataType)
31 case DataType::Float16:
32 return DataType::Float16;
33 case DataType::Float32:
34 return DataType::Float32;
35 case DataType::QAsymmS8:
36 return DataType::Signed32;
37 case DataType::QAsymmU8:
38 return DataType::Signed32;
39 case DataType::QSymmS8:
40 return DataType::Signed32;
41 case DataType::QSymmS16:
42 return DataType::Signed32;
44 BOOST_ASSERT_MSG(
false,
"Invalid input data type");
45 return DataType::Float32;
55 std::string to_string(T value)
57 std::ostringstream os;
63 void ValidatePointer(
const void* ptr, std::string
const& descName, std::string
const& paramName)
68 paramName +
" parameter must be set.");
73 void ValidateTensorShapesMatch(
const TensorInfo& first,
75 std::string
const& descName,
76 std::string
const& firstName,
77 std::string
const& secondName)
82 + firstName +
" & " + secondName +
" must have identical shapes");
87 void ValidateNumInputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
92 ": Requires exactly " + to_string(expectedSize) +
"input(s). " +
98 void ValidateNumOutputs(
const WorkloadInfo& workloadInfo, std::string
const& descName,
const unsigned int expectedSize)
103 ": Requires exactly " + to_string(expectedSize) +
" output(s). " +
109 void ValidateTensorNumDimensions(
const TensorInfo& tensor,
110 std::string
const& descName,
111 unsigned int numDimensions,
112 std::string
const& tensorName)
118 tensorName +
" tensor.");
123 void ValidateTensorNumElements(
const TensorInfo& tensor,
124 std::string
const& descName,
125 unsigned int numElements,
126 std::string
const& tensorName)
132 tensorName +
" tensor.");
137 void ValidateTensorNumDimNumElem(
const TensorInfo& tensorInfo,
138 unsigned int numDimension,
139 unsigned int numElements,
140 std::string
const& tensorName)
142 const std::string functionName{
"ValidateTensorNumDimNumElem"};
143 ValidateTensorNumDimensions(tensorInfo, functionName, numDimension, tensorName);
144 ValidateTensorNumElements(tensorInfo, functionName, numElements, tensorName);
149 const std::string& descName, std::string
const& tensorName)
158 void ValidPerAxisQuantizedDataType(
const TensorInfo& tensor,
const std::string& descName,
const std::string& tensorName)
162 tensor.
GetDataType() != DataType::QuantizedSymm8PerAxis)
165 ": Expected data type which supports per-axis quantization scheme but got " +
172 void ValidateTensorQuantizationSpace(
const TensorInfo& first,
174 const std::string& descName,
175 std::string
const& firstName,
176 std::string
const& secondName)
188 if (firstDataType != secondDataType)
191 " must be of the same quantized type, " +
199 " must have the same quantization space, " +
208 void ValidateBiasTensorQuantization(
const TensorInfo& biasTensor,
211 const std::string& descName)
214 auto VerifyBiasQuantizationScale = [&descName](
float biasScale,
float expectedScale) ->
void 216 constexpr
float tolerance = 0.000001f;
217 if (std::abs(biasScale - expectedScale) > tolerance)
220 std::stringstream msg;
221 msg << std::setprecision(10) << descName <<
": Expected " << expectedScale <<
222 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
240 if (weightScales.size() != biasScales.size())
242 std::stringstream msg;
243 msg << descName <<
": Expected matchhing number of per-axis quantization scales, but got different " 244 <<
"values: weights=" << weightScales.size() <<
", biases=" << biasScales.size();
248 for (
size_t i = 0ul; i < biasScales.size(); ++i)
251 VerifyBiasQuantizationScale(biasScales[i], expectedScale);
263 void ValidateTensors(
const std::vector<ITensorHandle*>& vec,
264 unsigned int numExpected,
265 const std::string& descName,
266 const std::string& varName)
268 if (vec.empty() && numExpected > 0)
273 for (
unsigned int i = 0; i < numExpected; ++i)
283 void ValidateBroadcastTensorShapesMatch(
const TensorInfo& first,
286 std::string
const& descName,
287 std::string
const& firstName,
288 std::string
const& secondName)
295 + firstName +
" & " + secondName
296 +
" must have the same number of dimensions in order to be broadcasted");
299 std::vector<uint32_t> outputDims(numDims, 0u);
300 for (uint32_t i = 0; i < numDims; i++)
303 const bool dimsNotOne = (first.
GetShape()[i] != 1) && (second.
GetShape()[i] != 1);
304 if (dimsNotEqual && dimsNotOne)
310 TensorShape broadcastShape =
TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
311 if (broadcastShape != output.
GetShape())
314 + firstName +
" & " + secondName
315 +
" does not match the output shape");
321 const std::vector<armnn::DataType>& supportedTypes,
322 std::string
const& descName)
324 auto iterator = std::find(supportedTypes.begin(), supportedTypes.end(), info.
GetDataType());
325 if (iterator == supportedTypes.end())
332 void ValidateTensorDataTypesMatch(
const TensorInfo& first,
334 std::string
const& descName,
335 std::string
const& firstName,
336 std::string
const& secondName)
341 " must have identical data types.");
346 void ValidateTensorNumElementsMatch(
const TensorInfo& first,
348 std::string
const& descName,
349 std::string
const& firstName,
350 std::string
const& secondName)
355 " must have the same number of elements.");
359 void ValidateWeightDataType(
const TensorInfo& inputInfo,
361 const std::string& descName)
367 const std::vector<DataType> validTypes =
372 DataType::QuantizedSymm8PerAxis
376 ValidateDataTypes(weightInfo, validTypes, descName);
380 ValidateTensorDataTypesMatch(inputInfo, weightInfo, descName,
"input",
"weight");
384 void ValidatePerAxisQuantizationDimension(
const TensorInfo& tensorInfo,
385 const std::string& descName,
386 const std::string& tensorName)
392 boost::format(
"%1%: Quantization dimension for per-axis quantization not set on tensor %2%.")
393 % descName % tensorName));
396 if (quantizationDim.
value() != 0)
399 boost::format(
"%1%: Quantization dimension for per-axis quantization expected to be 0 on tensor %2%, " 400 "but got: %3%") % descName % tensorName % quantizationDim.
value()));
404 void ValidatePerAxisQuantizationOffset(
const TensorInfo& tensorInfo,
405 const std::string& descName,
406 const std::string& tensorName)
409 if (quantizationOffset != 0)
412 boost::format(
"%1%: Quantization offset for per-axis quantization expected to be 0 on tensor %2%, " 413 "but got: %3%") % descName % tensorName % quantizationOffset));
417 void ValidatePerAxisQuantization(
const TensorInfo& inputInfo,
421 const std::string& descName)
428 const bool canHavePerAxisQuantization = (
IsQuantized8BitType(inputDataType)) && inputDataType == outputDataType;
430 if (!canHavePerAxisQuantization)
433 boost::format(
"%1%: Per-axis quantization parameters set on tensor %2%, " 434 "but data type does not support per-axis quantization.") % descName %
"weight"));
438 ValidPerAxisQuantizedDataType(weightInfo, descName,
"weight");
439 ValidatePerAxisQuantizationDimension(weightInfo, descName,
"weight");
440 ValidatePerAxisQuantizationOffset(weightInfo, descName,
"weight");
448 boost::format(
"%1%: Per-axis quantization parameters not set on bias tensor, despite being set on " 449 "weight tensor.") % descName));
452 ValidateTensorDataType(biasInfo, DataType::Signed32, descName,
"bias");
453 ValidatePerAxisQuantizationDimension(biasInfo, descName,
"bias");
454 ValidatePerAxisQuantizationOffset(biasInfo, descName,
"bias");
462 unsigned int numExpectedIn,
unsigned int numExpectedOut)
const 464 ValidateTensors(
m_Inputs, numExpectedIn, descName,
"input");
465 ValidateTensors(
m_Outputs, numExpectedOut, descName,
"output");
471 const std::string descriptorName{
"MemCopyQueueDescriptor"};
473 ValidateNumInputs(workloadInfo, descriptorName, 1);
474 ValidateNumOutputs(workloadInfo, descriptorName , 1);
479 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
480 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
485 boost::format(
"%1%: Number of inputs (%2%) does not match the number of outputs (%3%).") %
489 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
494 descriptorName % i));
500 descriptorName % i));
508 ValidateNumInputs(workloadInfo,
"MemImportQueueDescriptor", 1);
509 ValidateNumOutputs(workloadInfo,
"MemImportQueueDescriptor" , 1);
514 boost::format(
"Number of input infos (%1%) is not 1.")
522 boost::format(
"Number of input infos (%1%) does not match the number of output infos (%2%)")
532 boost::format(
"Number of elements for tensor input and output %1% does not match")
540 boost::format(
"Number of inputs (%1%) is not 1.")
547 boost::format(
"Number of inputs (%1%) does not match the number of outputs (%2%)")
551 for (
unsigned int i = 0; i <
m_Inputs.size(); ++i)
568 ValidateNumInputs(workloadInfo,
"MemSyncQueueDescriptor", 1);
569 ValidateNumOutputs(workloadInfo,
"MemSyncQueueDescriptor" , 1);
574 boost::format(
"Number of inputs (%1%) is not 1.")
581 boost::format(
"Number of outputs (%1%) is not 0.")
594 const std::string descriptorName{
"ActivationQueueDescriptor"};
596 ValidateNumInputs(workloadInfo, descriptorName, 1);
597 ValidateNumOutputs(workloadInfo, descriptorName, 1);
602 std::vector<DataType> supportedTypes =
612 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
613 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
614 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
619 const std::string descriptorName{
"ArgMinMaxQueueDescriptor"};
621 ValidateNumInputs(workloadInfo, descriptorName, 1);
622 ValidateNumOutputs(workloadInfo, descriptorName, 1);
632 std::vector<DataType> supportedInputTypes =
642 ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
644 auto inputShape = inputTensorInfo.
GetShape();
645 auto outputShape = outputTensorInfo.
GetShape();
650 const std::string outputShapeError{
": Output tensor shape does not match shape inferred from input tensor."};
653 if (inputShape.GetNumDimensions() == 1)
655 if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
662 for (
unsigned int i = 0; i < unsignedAxis; ++i)
664 if (outputShape[i] != inputShape[i])
670 for (
auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
672 if (outputShape[i - 1] != inputShape[i])
682 const std::string descriptorName{
"SoftmaxQueueDescriptor"};
684 ValidateNumInputs(workloadInfo, descriptorName, 1);
685 ValidateNumOutputs(workloadInfo, descriptorName, 1);
690 std::vector<DataType> supportedTypes =
700 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
701 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
702 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
707 const std::string descriptorName{
"SplitterQueueDescriptor"};
709 ValidateNumInputs(workloadInfo, descriptorName, 1);
712 std::vector<DataType> supportedTypes =
727 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
729 const std::string outputName =
"output_" + std::to_string(i);
730 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input", outputName);
741 descriptorName +
": Number of split windows " 742 "has to match number of workloadInfo.m_OutputTensorInfos. " 743 "Number of windows: " +
744 to_string(m_ViewOrigins.size()) +
745 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.
m_OutputTensorInfos.size()));
750 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
757 "have the same dimensionality as the input tensor. " 758 "Window origin (index: " +
759 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
760 " dimensions, the input " 762 to_string(inputDims) +
" dimensions.");
764 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
770 "be smaller or equal than the size of the input in that coord.");
778 const std::string descriptorName{
"ConcatQueueDescriptor"};
780 ValidateNumOutputs(workloadInfo, descriptorName, 1);
800 if(m_Parameters.GetConcatAxis() > workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions())
805 if (workloadInfo.
m_InputTensorInfos[0].GetShape().GetNumDimensions() - m_Parameters.GetConcatAxis() == 1)
813 descriptorName +
": Number of split windows " 814 "has to match number of workloadInfo.m_InputTensorInfos. " 815 "Number of windows: " +
816 to_string(m_ViewOrigins.size()) +
817 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.
m_InputTensorInfos.size()));
822 for(
unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
826 if (e.
m_Origin.size() != outputDims)
829 "have the same dimensionality as the output tensor. " 830 "Window origin (index: " +
831 to_string(w) +
") has " + to_string(e.
m_Origin.size()) +
832 " dimensions, the output " 834 to_string(outputDims) +
" dimensions.");
837 for (
unsigned int i = 0; i < e.
m_Origin.size(); ++i)
843 "be smaller or equal than the size of the output in that coord.");
849 std::vector<DataType> supportedTypes =
864 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
866 const std::string inputName =
"input_" + std::to_string(i);
867 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, inputName,
"output");
873 const std::string descriptorName{
"StackQueueDescriptor"};
875 ValidateNumOutputs(workloadInfo, descriptorName, 1);
883 const TensorShape& inputShape = m_Parameters.m_InputShape;
902 "than the number of input dimensions.");
907 for (
unsigned int i = 0; i < m_Parameters.m_Axis; ++i)
909 if (outputShape[i] != inputShape[i])
912 "match shape inferred from input tensor.");
916 if (outputShape[m_Parameters.m_Axis] != m_Parameters.m_NumInputs)
919 "match shape inferred from input tensor.");
922 for (
unsigned int i = m_Parameters.m_Axis + 1; i < inputShape.
GetNumDimensions() + 1; ++i)
924 if (outputShape[i] != inputShape[i-1])
927 "match shape inferred from input tensor.");
937 std::vector<DataType> supportedTypes =
948 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
956 "input_" + std::to_string(i));
968 const std::string descriptorName{
"FullyConnectedQueueDescriptor"};
970 ValidateNumInputs(workloadInfo, descriptorName, 1);
971 ValidateNumOutputs(workloadInfo, descriptorName, 1);
976 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
983 ValidatePointer(m_Weight, descriptorName,
"weight");
985 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
986 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 2,
"weight");
988 if (m_Parameters.m_BiasEnabled)
990 ValidatePointer(m_Bias, descriptorName,
"bias");
993 const TensorInfo& biasTensorInfo = m_Bias->GetTensorInfo();
994 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
997 ValidateTensorNumDimensions(biasTensorInfo, descriptorName, 1,
"bias");
1001 std::vector<DataType> supportedTypes =
1011 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1012 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1017 const std::string descriptorName{
"NormalizationQueueDescriptor"};
1019 ValidateNumInputs(workloadInfo, descriptorName, 1);
1020 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1026 std::vector<DataType> supportedTypes =
1035 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1037 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1039 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1044 const std::string descriptorName{
"AdditionQueueDescriptor"};
1046 ValidateNumInputs(workloadInfo, descriptorName, 2);
1047 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1053 std::vector<DataType> supportedTypes =
1063 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1064 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1065 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1067 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1068 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1070 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1080 const std::string descriptorName{
"MultiplicationQueueDescriptor"};
1082 ValidateNumInputs(workloadInfo, descriptorName, 2);
1083 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1089 std::vector<DataType> supportedTypes =
1099 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
1100 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
1101 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1103 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
1104 ValidateTensorDataTypesMatch(inputTensorInfo1, outputTensorInfo, descriptorName,
"input_1",
"output");
1106 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
1116 const std::string descriptorName{
"BatchNormalizationQueueDescriptor"};
1118 ValidateNumInputs(workloadInfo, descriptorName, 1);
1119 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1124 std::vector<DataType> supportedTypes =
1133 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1134 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1136 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1137 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1139 ValidatePointer(m_Mean, descriptorName,
"mean");
1140 ValidatePointer(m_Variance, descriptorName,
"variance");
1141 ValidatePointer(m_Beta, descriptorName,
"beta");
1142 ValidatePointer(m_Gamma, descriptorName,
"gamma");
1144 const TensorInfo& mean = m_Mean->GetTensorInfo();
1145 const TensorInfo& variance = m_Variance->GetTensorInfo();
1146 const TensorInfo& beta = m_Beta->GetTensorInfo();
1147 const TensorInfo& gamma = m_Gamma->GetTensorInfo();
1149 ValidateTensorNumDimensions(mean, descriptorName, 1,
"mean");
1150 ValidateTensorNumDimensions(variance, descriptorName, 1,
"variance");
1151 ValidateTensorNumDimensions(beta, descriptorName, 1,
"beta");
1152 ValidateTensorNumDimensions(gamma, descriptorName, 1,
"gamma");
1154 ValidateTensorShapesMatch(mean, variance, descriptorName,
"mean",
"variance");
1155 ValidateTensorShapesMatch(mean, beta, descriptorName,
"mean",
"beta");
1156 ValidateTensorShapesMatch(mean, gamma, descriptorName,
"mean",
"gamma");
1161 const std::string descriptorName{
"Convolution2dQueueDescriptor"};
1163 ValidateNumInputs(workloadInfo, descriptorName, 1);
1164 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1169 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1170 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1172 ValidatePointer(m_Weight, descriptorName,
"weight");
1174 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1175 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1177 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1180 if (m_Parameters.m_BiasEnabled)
1182 ValidatePointer(m_Bias, descriptorName,
"bias");
1184 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1188 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1191 ValidatePerAxisQuantization(inputTensorInfo,
1194 optionalBiasTensorInfo,
1197 std::vector<DataType> supportedTypes =
1208 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1209 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1214 const std::string descriptorName{
"DepthwiseConvolution2dQueueDescriptor"};
1216 ValidateNumInputs(workloadInfo, descriptorName, 1);
1217 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1222 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1223 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1225 ValidatePointer(m_Weight, descriptorName,
"weight");
1227 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
1228 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
1230 if (m_Parameters.m_DilationX < 1 || m_Parameters.m_DilationY < 1 )
1233 boost::str(boost::format(
"%1%: dilationX (provided %2%) and dilationY (provided %3%) " 1234 "cannot be smaller than 1.") % descriptorName %
1235 m_Parameters.m_DilationX % m_Parameters.m_DilationX));
1238 const unsigned int channelIndex = (m_Parameters.m_DataLayout ==
DataLayout::NCHW) ? 1 : 3;
1242 const unsigned int numWeightChannelMultiplier = weightTensorInfo.
GetShape()[0];
1243 const unsigned int numWeightInputChannels = weightTensorInfo.
GetShape()[1];
1244 const unsigned int numWeightOutputChannels = outputTensorInfo.
GetShape()[channelIndex];
1245 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
1248 boost::str(boost::format(
"%1%: output_channels (provided %2%) should be " 1249 "equal to input_channels (provided %3%) multiplied by channel_multiplier " 1250 "(provided %4%).") % descriptorName % numWeightOutputChannels %
1251 numWeightInputChannels % numWeightChannelMultiplier));
1254 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
1257 if (m_Parameters.m_BiasEnabled)
1259 ValidatePointer(m_Bias, descriptorName,
"bias");
1261 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
1264 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
1267 ValidatePerAxisQuantization(inputTensorInfo,
1270 optionalBiasTensorInfo,
1273 std::vector<DataType> supportedTypes =
1283 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1284 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1289 const std::string descriptorName{
"PermuteQueueDescriptor"};
1291 ValidateNumInputs(workloadInfo, descriptorName, 1);
1292 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1299 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
1300 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
1302 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
1304 if (inputTensorInfo.
GetShape()[i] != outputTensorInfo.
GetShape()[mapping[i]])
1307 " (=" + to_string(inputTensorInfo.
GetShape()[i]) +
") " +
1308 "must match dst dimension " + to_string(mapping[i]) +
1309 " (=" + to_string(outputTensorInfo.
GetShape()[mapping[i]]) +
")");
1313 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1318 const std::string descriptorName{
"Pooling2dQueueDescriptor"};
1320 ValidateNumInputs(workloadInfo, descriptorName, 1);
1321 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1326 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1327 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1329 std::vector<DataType> supportedTypes =
1339 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1340 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1345 const std::string descriptorName{
"ResizeBilinearQueueDescriptor"};
1347 ValidateNumInputs(workloadInfo, descriptorName, 1);
1348 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1353 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1354 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1356 std::vector<DataType> supportedTypes =
1365 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1366 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1369 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1370 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1371 if (inputBatchSize != outputBatchSize)
1374 boost::str(boost::format(
"%1%: Input batch size (%2%) " 1375 "does not match output batch size (%3%)") %
1376 descriptorName % inputBatchSize % outputBatchSize));
1382 if (inputChannelCount != outputChannelCount)
1385 boost::str(boost::format(
"%1%: Input channel count (%2%) " 1386 "does not match output channel count (%3%)") %
1387 descriptorName % inputChannelCount % outputChannelCount));
1393 const std::string descriptorName{
"ResizeQueueDescriptor"};
1395 ValidateNumInputs(workloadInfo, descriptorName, 1);
1396 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1401 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1402 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1404 std::vector<DataType> supportedTypes =
1414 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1415 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1418 const unsigned int inputBatchSize = inputTensorInfo.
GetShape()[0];
1419 const unsigned int outputBatchSize = outputTensorInfo.
GetShape()[0];
1420 if (inputBatchSize != outputBatchSize)
1423 boost::str(boost::format(
"%1%: Input batch size (%2%) " 1424 "does not match output batch size (%3%)") %
1425 descriptorName % inputBatchSize % outputBatchSize));
1431 if (inputChannelCount != outputChannelCount)
1434 boost::str(boost::format(
"%1%: Input channel count (%2%) " 1435 "does not match output channel count (%3%)") %
1436 descriptorName % inputChannelCount % outputChannelCount));
1442 const std::string descriptorName{
"FakeQuantizationQueueDescriptor"};
1444 ValidateNumInputs(workloadInfo, descriptorName, 1);
1445 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1450 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 2,
"input");
1451 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 2,
"output");
1453 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1455 if (m_Parameters.m_Min > m_Parameters.m_Max)
1463 const std::string descriptorName{
"InstanceNormalizationQueueDescriptor"};
1465 ValidateNumInputs(workloadInfo, descriptorName, 1);
1466 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1476 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1479 std::vector<DataType> supportedTypes =
1486 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1487 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1492 const std::string descriptorName{
"L2NormalizationQueueDescriptor"};
1494 ValidateNumInputs(workloadInfo, descriptorName, 1);
1495 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1505 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1508 std::vector<DataType> supportedTypes =
1517 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1518 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1523 const std::string descriptorName{
"LogSoftmaxQueueDescriptor"};
1525 ValidateNumInputs(workloadInfo, descriptorName, 1);
1526 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1531 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1533 std::vector<DataType> supportedTypes =
1540 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1541 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1546 const std::string descriptorName{
"ConstantQueueDescriptor"};
1548 ValidateNumInputs(workloadInfo, descriptorName, 0);
1549 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1557 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(), outputTensorInfo, descriptorName,
"constant",
"output");
1560 std::vector<DataType> supportedTypes =
1572 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1577 const std::string descriptorName{
"ReshapeQueueDescriptor"};
1579 ValidateNumInputs(workloadInfo, descriptorName, 1);
1580 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1585 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1588 std::vector<DataType> supportedTypes =
1599 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1600 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1605 const std::string descriptorName{
"SpaceToBatchNdQueueDescriptor"};
1607 ValidateNumInputs(workloadInfo, descriptorName, 1);
1608 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1613 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1614 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1616 if (m_Parameters.m_BlockShape.size() != 2)
1621 if (m_Parameters.m_BlockShape.size() != m_Parameters.m_PadList.size())
1624 "dimensions as Block Shape.");
1629 std::pair<unsigned int, unsigned int> heightPad = m_Parameters.m_PadList[0];
1630 std::pair<unsigned int, unsigned int> widthPad = m_Parameters.m_PadList[1];
1634 const unsigned int inputWidth = inputShape[dimensionIndices.
GetWidthIndex()] +
1635 widthPad.first + widthPad.second;
1636 const unsigned int inputHeight = inputShape[dimensionIndices.
GetHeightIndex()] +
1637 heightPad.first + heightPad.second;
1639 const unsigned int numInputElements = inputShape[0] * inputHeight * inputWidth *
1641 const unsigned int numOutputElements = outputTensorInfo.
GetNumElements();
1643 if (numOutputElements != numInputElements)
1646 to_string(numInputElements) +
" after padding but output tensor has " +
1647 to_string(numOutputElements) +
" elements.");
1650 if (inputHeight % m_Parameters.m_BlockShape[0] != 0 || inputWidth % m_Parameters.m_BlockShape[1] != 0)
1653 "divisible by Block Shape in all spatial dimensions");
1656 std::vector<DataType> supportedTypes =
1665 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1666 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1671 const std::string descriptorName{
"SpaceToDepthQueueDescriptor"};
1673 ValidateNumInputs(workloadInfo, descriptorName, 1);
1674 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1679 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
1680 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
1682 std::vector<DataType> supportedTypes =
1691 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1692 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
1694 ValidateTensorNumElementsMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
1696 if (m_Parameters.m_BlockSize == 0)
1702 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
1707 if (inputShape[hIndex] % m_Parameters.m_BlockSize != 0 || inputShape[wIndex] % m_Parameters.m_BlockSize != 0)
1710 "by block size in all spatial dimensions");
1714 if (outputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
1717 "must be divisible by the square of block size." );
1723 const std::string descriptorName{
"FloorQueueDescriptor"};
1725 ValidateNumInputs(workloadInfo, descriptorName, 1);
1726 ValidateNumOutputs(workloadInfo, descriptorName, 1);
1731 std::vector<DataType> supportedTypes =
1739 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
1741 if (inputTensorInfo != outputTensorInfo)
1751 const std::string descriptorName{
"LstmQueueDescriptor"};
1763 std::vector<DataType> supportedTypes =
1772 ValidateDataTypes(workloadInfo.
m_InputTensorInfos[0], supportedTypes, descriptorName);
1781 "input_" + std::to_string(i));
1788 "LstmQueueDescriptor",
1790 "output_" + std::to_string(i));
1796 if (m_Parameters.m_ClippingThresCell < 0.0f)
1800 if (m_Parameters.m_ClippingThresProj < 0.0f)
1809 ValidatePointer(m_InputToOutputWeights,
"Null pointer check",
"InputToOutputWeights");
1810 const uint32_t n_cell = m_InputToOutputWeights->GetShape()[0];
1811 ValidatePointer(m_RecurrentToOutputWeights,
"Null pointer check",
"RecurrentToOutputWeights");
1812 const uint32_t n_output = m_RecurrentToOutputWeights->GetShape()[1];
1815 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[0], 2, (n_batch * n_input),
1816 descriptorName +
" input_0");
1818 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[1], 2, (n_batch * n_output),
1819 descriptorName +
" input_1");
1821 ValidateTensorNumDimNumElem(workloadInfo.
m_InputTensorInfos[2], 2, (n_batch * n_cell),
1822 descriptorName +
" input_2");
1824 unsigned int scratchBufferSize = m_Parameters.m_CifgEnabled ? n_cell * 3 : n_cell * 4;
1825 ValidateTensorNumDimNumElem(workloadInfo.
m_OutputTensorInfos[0], 2, (n_batch * scratchBufferSize),
1826 descriptorName +
" output_0");
1829 descriptorName +
" output_1");
1832 descriptorName +
" output_2");
1835 descriptorName +
" output_3");
1839 if ( m_InputToInputWeights )
1841 ValidateTensorNumDimNumElem(m_InputToInputWeights->GetTensorInfo(), 2,
1842 (n_cell * n_input),
"InputLayerNormWeights");
1845 ValidatePointer(m_InputToForgetWeights,
"Null pointer check",
"InputToForgetWeights");
1846 ValidateTensorNumDimNumElem(m_InputToForgetWeights->GetTensorInfo(), 2,
1847 (n_cell * n_input),
"InputToForgetWeights");
1849 ValidatePointer(m_InputToCellWeights,
"Null pointer check",
"InputToCellWeights");
1850 ValidateTensorNumDimNumElem(m_InputToCellWeights->GetTensorInfo(), 2,
1851 (n_cell * n_input),
"InputToCellWeights");
1853 if ( m_RecurrentToInputWeights )
1855 ValidateTensorNumDimNumElem(m_RecurrentToInputWeights->GetTensorInfo(), 2,
1856 (n_cell * n_output),
"RecurrentToInputWeights");
1859 ValidatePointer(m_RecurrentToForgetWeights,
"Null pointer check",
"RecurrentToForgetWeights");
1860 ValidateTensorNumDimNumElem(m_RecurrentToForgetWeights->GetTensorInfo(), 2,
1861 (n_cell * n_output),
"RecurrentToForgetWeights");
1863 ValidatePointer(m_RecurrentToCellWeights,
"Null pointer check",
"RecurrentToCellWeights");
1864 ValidateTensorNumDimNumElem(m_RecurrentToCellWeights->GetTensorInfo(), 2,
1865 (n_cell * n_output),
"RecurrentToCellWeights");
1869 bool cifg_weights_all_or_none = ((m_InputToInputWeights && m_RecurrentToInputWeights &&
1870 !m_Parameters.m_CifgEnabled) ||
1871 (!m_InputToInputWeights && !m_RecurrentToInputWeights &&
1872 m_Parameters.m_CifgEnabled));
1873 if (!cifg_weights_all_or_none)
1876 "RecurrentToInputWeights must either both be present (regular LSTM) " 1877 "or both not present (CIFG-LSTM). In addition CifgEnable must be set " 1881 if ( m_CellToInputWeights )
1883 ValidateTensorNumDimNumElem(m_CellToInputWeights->GetTensorInfo(), 1,
1884 n_cell,
"CellToInputWeights");
1886 if ( m_CellToForgetWeights )
1888 ValidateTensorNumDimNumElem(m_CellToForgetWeights->GetTensorInfo(), 1,
1889 n_cell,
"CellToForgetWeights");
1891 if ( m_CellToOutputWeights )
1893 ValidateTensorNumDimNumElem(m_CellToOutputWeights->GetTensorInfo(), 1,
1894 n_cell,
"CellToOutputWeights");
1898 bool peephole_weights_all_or_none =
1899 (((m_CellToInputWeights || m_Parameters.m_CifgEnabled) && m_CellToForgetWeights
1900 && m_CellToOutputWeights && m_Parameters.m_PeepholeEnabled)
1901 || ( !m_CellToInputWeights && !m_CellToForgetWeights
1902 && !m_CellToOutputWeights && !m_Parameters.m_PeepholeEnabled));
1903 if (!peephole_weights_all_or_none)
1909 if (m_Parameters.m_CifgEnabled)
1911 if (m_InputGateBias)
1918 if (!m_InputGateBias)
1921 "must be present.");
1923 ValidateTensorNumDimNumElem(m_InputGateBias->GetTensorInfo(), 1,
1924 n_cell,
"InputGateBias");
1927 ValidatePointer(m_ForgetGateBias,
"Null pointer check",
"ForgetGateBias");
1928 ValidateTensorNumDimNumElem(m_ForgetGateBias->GetTensorInfo(), 1, n_cell,
"ForgetGateBias");
1930 ValidatePointer(m_CellBias,
"Null pointer check",
"CellBias");
1931 ValidateTensorNumDimNumElem(m_CellBias->GetTensorInfo(), 1, n_cell,
"CellBias");
1933 ValidatePointer(m_OutputGateBias,
"Null pointer check",
"OutputGateBias");
1934 ValidateTensorNumDimNumElem(m_OutputGateBias->GetTensorInfo(), 1, n_cell,
"OutputGateBias");
1936 if (m_ProjectionWeights)
1938 ValidateTensorNumDimNumElem(m_ProjectionWeights->GetTensorInfo(), 2,
1939 (n_cell * n_output),
"ProjectionWeights");
1941 if (m_ProjectionBias)
1943 ValidateTensorNumDimNumElem(m_ProjectionBias->GetTensorInfo(), 1, n_output,
"ProjectionBias");
1950 bool projecton_tensors_consistent = ((!m_ProjectionWeights && !m_ProjectionBias &&
1951 !m_Parameters.m_ProjectionEnabled)
1952 || (m_ProjectionWeights && !m_ProjectionBias &&
1953 m_Parameters.m_ProjectionEnabled)
1954 || (m_ProjectionWeights && m_ProjectionBias &&
1955 m_Parameters.m_ProjectionEnabled));
1956 if (!projecton_tensors_consistent)
1965 if (m_InputLayerNormWeights)
1967 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"InputLayerNormWeights");
1969 if (m_ForgetLayerNormWeights)
1971 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
1973 if (m_CellLayerNormWeights)
1975 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
1977 if (m_OutputLayerNormWeights)
1979 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
1982 if (m_Parameters.m_LayerNormEnabled)
1984 if (!m_Parameters.m_CifgEnabled)
1986 if (!m_InputLayerNormWeights)
1989 "disabled but InputLayerNormWeights are not present");
1991 ValidateTensorNumDimNumElem(m_InputLayerNormWeights->GetTensorInfo(),
1992 1, n_cell,
"InputLayerNormWeights");
1994 else if (m_InputLayerNormWeights)
2000 ValidatePointer(m_ForgetLayerNormWeights,
"Null pointer check layer normalisation enabled",
2001 "ForgetLayerNormWeights");
2002 ValidateTensorNumDimNumElem(m_ForgetLayerNormWeights->GetTensorInfo(), 1, n_cell,
"ForgetLayerNormWeights");
2004 ValidatePointer(m_OutputLayerNormWeights,
"Null pointer check layer normalisation enabled",
2005 "OutputLayerNormWeights");
2006 ValidateTensorNumDimNumElem(m_OutputLayerNormWeights->GetTensorInfo(), 1, n_cell,
"OutputLayerNormWeights");
2008 ValidatePointer(m_CellLayerNormWeights,
"Null pointer check layer normalisation enabled",
2009 "CellLayerNormWeights");
2010 ValidateTensorNumDimNumElem(m_CellLayerNormWeights->GetTensorInfo(), 1, n_cell,
"CellLayerNormWeights");
2012 else if (m_InputLayerNormWeights || m_ForgetLayerNormWeights || m_OutputLayerNormWeights || m_CellLayerNormWeights)
2015 "normalisation weights are present.");
2021 const std::string descriptorName{
"ConvertFp32ToFp16QueueDescriptor"};
2023 ValidateNumInputs(workloadInfo, descriptorName, 1);
2024 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2039 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2044 const std::string descriptorName{
"ConvertFp16ToFp32QueueDescriptor"};
2046 ValidateNumInputs(workloadInfo, descriptorName, 1);
2047 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2062 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2067 const std::string descriptorName{
"DivisionQueueDescriptor"};
2069 ValidateNumInputs(workloadInfo, descriptorName, 2);
2070 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2076 std::vector<DataType> supportedTypes =
2085 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2086 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2087 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2089 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2099 const std::string descriptorName{
"SubtractionQueueDescriptor"};
2101 ValidateNumInputs(workloadInfo, descriptorName, 2);
2102 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2108 std::vector<DataType> supportedTypes =
2117 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2118 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2119 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2121 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2131 const std::string descriptorName{
"MaximumQueueDescriptor"};
2133 ValidateNumInputs(workloadInfo, descriptorName, 2);
2134 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2140 std::vector<DataType> supportedTypes =
2151 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2152 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2153 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2155 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2165 const std::string descriptorName{
"MeanQueueDescriptor"};
2167 ValidateNumInputs(workloadInfo, descriptorName, 1);
2168 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2173 std::vector<DataType> supportedTypes =
2184 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2185 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2187 if (m_Parameters.m_KeepDims)
2189 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2191 else if (m_Parameters.m_Axis.empty())
2193 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1,
"output");
2197 unsigned int outputDim =
2199 ValidateTensorNumDimensions(outputTensorInfo,
2201 outputDim > 0 ? outputDim : 1,
2208 const std::string descriptorName{
"PadQueueDescriptor"};
2210 ValidateNumInputs(workloadInfo, descriptorName, 1);
2211 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2217 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, inputTensorInfo.
GetNumDimensions(),
"output");
2222 "as there are dimensions in the input tensor that is " +
2224 " not " + std::to_string(m_Parameters.m_PadList.size()) +
" entries.");
2230 const std::string descriptorName{
"QuantizeQueueDescriptor"};
2232 ValidateNumInputs(workloadInfo, descriptorName, 1);
2233 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2238 std::vector<DataType> supportedTypes =
2249 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2259 const std::string descriptorName{
"BatchToSpaceNdQueueDescriptor"};
2261 ValidateNumInputs(workloadInfo, descriptorName, 1);
2262 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2267 std::vector<DataType> supportedTypes =
2276 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2277 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2282 const std::string descriptorName{
"StridedSliceQueueDescriptor"};
2284 ValidateNumInputs(workloadInfo, descriptorName, 1);
2285 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2290 std::vector<DataType> supportedTypes =
2299 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2300 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2302 ValidateTensorQuantizationSpace(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2311 if (m_Parameters.m_Begin.size() != rank)
2316 if (m_Parameters.m_End.size() != rank)
2321 if (m_Parameters.m_Stride.size() != rank)
2327 for (
auto& stride : m_Parameters.m_Stride)
2338 const std::string descriptorName{
"MinimumQueueDescriptor"};
2340 ValidateNumInputs(workloadInfo, descriptorName, 2);
2341 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2347 std::vector<DataType> supportedTypes =
2357 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2358 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2359 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2361 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2371 const std::string descriptorName{
"DebugQueueDescriptor"};
2373 ValidateNumInputs(workloadInfo, descriptorName, 1);
2374 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2379 const std::string descriptorName{
"EqualQueueDescriptor"};
2381 ValidateNumInputs(workloadInfo, descriptorName, 2);
2382 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2388 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2403 const std::string descriptorName{
"GreaterQueueDescriptor"};
2405 ValidateNumInputs(workloadInfo, descriptorName, 2);
2406 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2412 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
2427 const std::string descriptorName{
"RsqrtQueueDescriptor"};
2429 ValidateNumInputs(workloadInfo, descriptorName, 1);
2430 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2435 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2437 std::vector<DataType> supportedTypes =
2446 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2447 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2452 const std::string descriptorName{
"GatherQueueDescriptor"};
2454 ValidateNumInputs(workloadInfo, descriptorName, 2);
2455 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2466 std::vector<DataType> supportedTypes =
2475 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2477 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2480 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, outputDim,
"output");
2485 const std::string& descriptorName{
"DetectionPostProcessQueueDescriptor"};
2487 ValidateNumInputs(workloadInfo, descriptorName, 2);
2495 if (m_Anchors ==
nullptr)
2509 ValidateTensorNumDimensions(boxEncodingsInfo, descriptorName, 3,
"box encodings");
2510 ValidateTensorNumDimensions(scoresInfo, descriptorName, 3,
"scores");
2511 ValidateTensorNumDimensions(anchorsInfo, descriptorName, 2,
"anchors");
2513 const std::vector<DataType> supportedInputTypes =
2522 ValidateDataTypes(boxEncodingsInfo, supportedInputTypes, descriptorName);
2523 ValidateDataTypes(scoresInfo, supportedInputTypes, descriptorName);
2524 ValidateDataTypes(anchorsInfo, supportedInputTypes, descriptorName);
2526 ValidateTensorNumDimensions(detectionBoxesInfo, descriptorName, 3,
"detection boxes");
2527 ValidateTensorNumDimensions(detectionScoresInfo, descriptorName, 2,
"detection scores");
2528 ValidateTensorNumDimensions(detectionClassesInfo, descriptorName, 2,
"detection classes");
2529 ValidateTensorNumDimensions(numDetectionsInfo, descriptorName, 1,
"num detections");
2532 ValidateTensorDataType(detectionBoxesInfo,
DataType::Float32, descriptorName,
"detection boxes");
2533 ValidateTensorDataType(detectionScoresInfo,
DataType::Float32, descriptorName,
"detection scores");
2534 ValidateTensorDataType(detectionClassesInfo,
DataType::Float32, descriptorName,
"detection classes");
2535 ValidateTensorDataType(numDetectionsInfo,
DataType::Float32, descriptorName,
"num detections");
2537 if (m_Parameters.m_NmsIouThreshold <= 0.0f || m_Parameters.m_NmsIouThreshold > 1.0f)
2540 "must be positive and less than or equal to 1.");
2543 if (scoresInfo.
GetShape()[2] != m_Parameters.m_NumClasses + 1)
2546 "should be equal to number of classes + 1.");
2552 const std::string& descriptorName{
"DequantizeQueueDescriptor"};
2554 ValidateNumInputs(workloadInfo, descriptorName, 1);
2555 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2565 std::vector<DataType> supportedTypes =
2572 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2577 const std::string& descriptorName{
"MergeQueueDescriptor"};
2579 ValidateNumInputs(workloadInfo, descriptorName, 2);
2580 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2586 ValidateTensorShapesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2587 ValidateTensorShapesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2589 ValidateTensorDataTypesMatch(inputTensorInfo0, inputTensorInfo1, descriptorName,
"input_0",
"input_1");
2590 ValidateTensorDataTypesMatch(inputTensorInfo0, outputTensorInfo, descriptorName,
"input_0",
"output");
2595 const std::string& descriptorName{
"SwitchQueueDescriptor"};
2597 ValidateNumInputs(workloadInfo, descriptorName, 2);
2598 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2606 std::vector<DataType> supportedTypes =
2614 ValidateDataTypes(inputTensorInfo0, supportedTypes, descriptorName);
2615 ValidateDataTypes(inputTensorInfo1, supportedTypes, descriptorName);
2617 ValidateDataTypes(outputTensorInfo0, supportedTypes, descriptorName);
2618 ValidateDataTypes(outputTensorInfo1, supportedTypes, descriptorName);
2620 ValidateTensorShapesMatch(inputTensorInfo0,
2626 ValidateTensorShapesMatch(inputTensorInfo0,
2640 const std::string& descriptorName{
"PreluQueueDescriptor"};
2642 ValidateNumInputs(workloadInfo, descriptorName, 2);
2643 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2649 std::vector<DataType> supportedTypes
2658 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2659 ValidateDataTypes(alphaTensorInfo, supportedTypes, descriptorName);
2661 ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
2663 ValidateTensorDataTypesMatch(inputTensorInfo, alphaTensorInfo, descriptorName,
"input",
"alpha");
2664 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"ouptut");
2666 ValidateBroadcastTensorShapesMatch(inputTensorInfo,
2676 const std::string descriptorName{
"TransposeConvolution2dQueueDescriptor"};
2678 ValidateNumInputs(workloadInfo, descriptorName, 1);
2679 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2684 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, 4,
"input");
2685 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 4,
"output");
2687 ValidatePointer(m_Weight, descriptorName,
"weight");
2689 const TensorInfo& weightTensorInfo = m_Weight->GetTensorInfo();
2690 ValidateTensorNumDimensions(weightTensorInfo, descriptorName, 4,
"weight");
2692 ValidateWeightDataType(inputTensorInfo, weightTensorInfo, descriptorName);
2695 if (m_Parameters.m_BiasEnabled)
2697 ValidatePointer(m_Bias, descriptorName,
"bias");
2699 optionalBiasTensorInfo = MakeOptional<TensorInfo>(m_Bias->GetTensorInfo());
2703 ValidateBiasTensorQuantization(biasTensorInfo, inputTensorInfo, weightTensorInfo, descriptorName);
2706 ValidatePerAxisQuantization(inputTensorInfo,
2709 optionalBiasTensorInfo,
2712 std::vector<DataType> supportedTypes =
2721 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2722 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2727 const std::string descriptorName{
"TransposeQueueDescriptor"};
2729 ValidateNumInputs(workloadInfo, descriptorName, 1);
2730 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2737 ValidateTensorNumDimensions(inputTensorInfo, descriptorName, mapping.
GetSize(),
"input");
2738 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, mapping.
GetSize(),
"output");
2740 for (
unsigned int i = 0u; i < mapping.
GetSize(); ++i)
2742 if (inputTensorInfo.
GetShape()[mapping[i]] != outputTensorInfo.
GetShape()[i])
2745 " (=" + to_string(inputTensorInfo.
GetShape()[mapping[i]]) +
") " +
2746 "must match dst dimension " + to_string(i) +
2747 " (=" + to_string(outputTensorInfo.
GetShape()[i]) +
")");
2751 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2756 const std::string descriptorName{
"QuantizedLstmQueueDescriptor"};
2759 ValidateNumInputs(workloadInfo, descriptorName, 3);
2760 ValidateNumOutputs(workloadInfo, descriptorName, 2);
2770 std::vector<DataType> inputOutputSupportedTypes =
2775 std::vector<DataType> cellStateSupportedTypes =
2780 std::vector<DataType> weightsSupportedTypes =
2785 std::vector<DataType> biasSupportedTypes =
2791 ValidateDataTypes(inputInfo, inputOutputSupportedTypes, descriptorName);
2792 ValidateDataTypes(cellStateInInfo, cellStateSupportedTypes, descriptorName);
2793 ValidateDataTypes(outputStateInInfo, inputOutputSupportedTypes, descriptorName);
2795 ValidateDataTypes(cellStateOutInfo, cellStateSupportedTypes, descriptorName);
2796 ValidateDataTypes(outputStateOutInfo, inputOutputSupportedTypes, descriptorName);
2799 ValidateTensorDataTypesMatch(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
2800 ValidateTensorDataTypesMatch(outputStateInInfo, outputStateOutInfo, descriptorName,
2801 "outputStateIn",
"outputStateOut");
2802 ValidateTensorDataTypesMatch(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
2805 ValidateTensorQuantizationSpace(inputInfo, outputStateInInfo, descriptorName,
"input",
"outputStateIn");
2806 ValidateTensorQuantizationSpace(inputInfo, outputStateOutInfo, descriptorName,
"input",
"outputStateOut");
2807 ValidateTensorQuantizationSpace(cellStateInInfo, cellStateOutInfo, descriptorName,
"cellStateIn",
"cellStateOut");
2810 const uint32_t numBatches = inputInfo.GetShape()[0];
2811 const uint32_t inputSize = inputInfo.GetShape()[1];
2812 const uint32_t outputSize = cellStateInInfo.GetShape()[1];
2815 ValidateTensorNumDimNumElem(inputInfo, 2, (numBatches * inputSize), descriptorName +
" input");
2816 ValidateTensorNumDimNumElem(cellStateInInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateIn");
2817 ValidateTensorNumDimNumElem(outputStateInInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateIn");
2818 ValidateTensorNumDimNumElem(cellStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" cellStateOut");
2819 ValidateTensorNumDimNumElem(outputStateOutInfo, 2, (numBatches * outputSize), descriptorName +
" outputStateOut");
2822 ValidatePointer(m_InputToInputWeights, descriptorName,
"InputToInputWeights");
2823 auto inputToInputWeightsInfo = m_InputToInputWeights->GetTensorInfo();
2824 ValidateTensorNumDimNumElem(inputToInputWeightsInfo, 2, (outputSize * inputSize),
" InputToInputWeights");
2826 ValidatePointer(m_InputToForgetWeights, descriptorName,
"InputToForgetWeights");
2827 auto inputToForgetWeightsInfo = m_InputToForgetWeights->GetTensorInfo();
2828 ValidateTensorNumDimNumElem(inputToForgetWeightsInfo, 2, (outputSize * inputSize),
" InputToForgetWeights");
2830 ValidatePointer(m_InputToCellWeights, descriptorName,
"InputToCellWeights");
2831 auto inputToCellWeightsInfo = m_InputToCellWeights->GetTensorInfo();
2832 ValidateTensorNumDimNumElem(inputToCellWeightsInfo, 2, (outputSize * inputSize),
" InputToCellWeights");
2834 ValidatePointer(m_InputToOutputWeights, descriptorName,
"InputToOutputWeights");
2835 auto inputToOutputWeightsInfo = m_InputToOutputWeights->GetTensorInfo();
2836 ValidateTensorNumDimNumElem(inputToOutputWeightsInfo, 2, (outputSize * inputSize),
" InputToOutputWeights");
2838 ValidatePointer(m_RecurrentToInputWeights, descriptorName,
"RecurrentToInputWeights");
2839 auto recurrentToInputWeightsInfo = m_RecurrentToInputWeights->GetTensorInfo();
2840 ValidateTensorNumDimNumElem(recurrentToInputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToInputWeights");
2842 ValidatePointer(m_RecurrentToForgetWeights, descriptorName,
"RecurrentToForgetWeights");
2843 auto recurrentToForgetWeightsInfo = m_RecurrentToForgetWeights->GetTensorInfo();
2844 ValidateTensorNumDimNumElem(recurrentToForgetWeightsInfo, 2, (outputSize * outputSize),
2845 " RecurrentToForgetWeights");
2847 ValidatePointer(m_RecurrentToCellWeights, descriptorName,
"RecurrentToCellWeights");
2848 auto recurrentToCellWeightsInfo = m_RecurrentToCellWeights->GetTensorInfo();
2849 ValidateTensorNumDimNumElem(recurrentToCellWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
2851 ValidatePointer(m_RecurrentToOutputWeights, descriptorName,
"RecurrentToOutputWeights");
2852 auto recurrentToOutputWeightsInfo = m_RecurrentToOutputWeights->GetTensorInfo();
2853 ValidateTensorNumDimNumElem(recurrentToOutputWeightsInfo, 2, (outputSize * outputSize),
" RecurrentToCellWeights");
2856 ValidateDataTypes(inputToInputWeightsInfo, weightsSupportedTypes, descriptorName);
2858 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToForgetWeightsInfo, descriptorName,
2859 "inputToInputWeights",
"inputToForgetWeights");
2860 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToCellWeightsInfo, descriptorName,
2861 "inputToInputWeights",
"inputToCellWeights");
2862 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, inputToOutputWeightsInfo, descriptorName,
2863 "inputToInputWeights",
"inputToOutputWeights");
2865 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToInputWeightsInfo, descriptorName,
2866 "inputToInputWeights",
"recurrentToInputWeights");
2867 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToForgetWeightsInfo, descriptorName,
2868 "inputToInputWeights",
"recurrentToForgeteights");
2869 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToCellWeightsInfo, descriptorName,
2870 "inputToInputWeights",
"recurrentToCellWeights");
2871 ValidateTensorDataTypesMatch(inputToInputWeightsInfo, recurrentToOutputWeightsInfo, descriptorName,
2872 "inputToInputWeights",
"recurrentToOutputWeights");
2875 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToForgetWeightsInfo,
2876 descriptorName,
"inputToInputWeights",
"inputToForgetWeights");
2877 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToCellWeightsInfo,
2878 descriptorName,
"inputToInputWeights",
"inputToCellWeights");
2879 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, inputToOutputWeightsInfo,
2880 descriptorName,
"inputToInputWeights",
"inputToOutputWeights");
2882 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToInputWeightsInfo,
2883 descriptorName,
"inputToInputWeights",
"recurrentToInputWeights");
2884 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToForgetWeightsInfo,
2885 descriptorName,
"inputToInputWeights",
"recurrentToForgetWeights");
2886 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToCellWeightsInfo,
2887 descriptorName,
"inputToInputWeights",
"recurrentToCellWeights");
2888 ValidateTensorQuantizationSpace(inputToInputWeightsInfo, recurrentToOutputWeightsInfo,
2889 descriptorName,
"inputToInputWeights",
"recurrentToOutputWeights");
2892 ValidatePointer(m_InputGateBias, descriptorName,
"InputGateBias");
2893 auto inputGateBiasInfo = m_InputGateBias->GetTensorInfo();
2894 ValidateTensorNumDimNumElem(inputGateBiasInfo, 1, outputSize,
" InputGateBias");
2896 ValidatePointer(m_ForgetGateBias, descriptorName,
"ForgetGateBias");
2897 auto forgetGateBiasInfo = m_ForgetGateBias->GetTensorInfo();
2898 ValidateTensorNumDimNumElem(forgetGateBiasInfo, 1, outputSize,
" ForgetGateBias");
2900 ValidatePointer(m_CellBias, descriptorName,
"CellBias");
2901 auto cellBiasInfo = m_CellBias->GetTensorInfo();
2902 ValidateTensorNumDimNumElem(cellBiasInfo, 1, outputSize,
" CellBias");
2904 ValidatePointer(m_OutputGateBias, descriptorName,
"OutputGateBias");
2905 auto outputGateBiasInfo = m_OutputGateBias->GetTensorInfo();
2906 ValidateTensorNumDimNumElem(outputGateBiasInfo, 1, outputSize,
" OutputGateBias");
2909 ValidateDataTypes(inputGateBiasInfo, biasSupportedTypes, descriptorName);
2911 ValidateTensorDataTypesMatch(inputGateBiasInfo, forgetGateBiasInfo, descriptorName,
2912 "inputGateBias",
"forgetGateBias");
2913 ValidateTensorDataTypesMatch(inputGateBiasInfo, cellBiasInfo, descriptorName,
2914 "inputGateBias",
"cellBias");
2915 ValidateTensorDataTypesMatch(inputGateBiasInfo, outputGateBiasInfo, descriptorName,
2916 "inputGateBias",
"outputGateBias");
2919 ValidateBiasTensorQuantization(inputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2920 ValidateBiasTensorQuantization(forgetGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2921 ValidateBiasTensorQuantization(cellBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2922 ValidateBiasTensorQuantization(outputGateBiasInfo, inputInfo, inputToInputWeightsInfo, descriptorName);
2927 const std::string descriptorName{
"AbsQueueDescriptor"};
2929 ValidateNumInputs(workloadInfo, descriptorName, 1);
2930 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2935 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2937 std::vector<DataType> supportedTypes =
2946 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
2947 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2952 const std::string descriptorName{
"SliceQueueDescriptor"};
2954 ValidateNumInputs(workloadInfo, descriptorName, 1);
2955 ValidateNumOutputs(workloadInfo, descriptorName, 1);
2960 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
2968 ValidateTensorNumDimensions(outputTensorInfo, descriptorName, rank,
"output");
2971 if (m_Parameters.m_Begin.size() != rank)
2974 ": Length of begin offset descriptor must equal rank " + std::to_string(rank));
2976 if (m_Parameters.m_Size.size() != rank)
2979 ": Length of size descriptor must equal rank " + std::to_string(rank));
2984 for (
unsigned int i = 0u; i < rank; ++i)
2986 if (m_Parameters.m_Size[i] != outputShape[i])
2995 for(
unsigned int i = 0u; i < rank; ++i)
2997 if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
3000 std::to_string(i) +
" exceeds input size.");
3007 const std::string descriptorName{
"DepthToSpaceQueueDescriptor"};
3009 ValidateNumInputs(workloadInfo, descriptorName, 1);
3010 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3015 ValidateTensorNumDimensions(inputInfo, descriptorName, 4,
"input");
3016 ValidateTensorNumDimensions(outputInfo, descriptorName, 4,
"output");
3018 std::vector<DataType> supportedTypes =
3027 ValidateDataTypes(inputInfo, supportedTypes, descriptorName);
3028 ValidateDataTypes(outputInfo, supportedTypes, descriptorName);
3030 ValidateTensorNumElementsMatch(inputInfo, outputInfo, descriptorName,
"input",
"output");
3032 if (m_Parameters.m_BlockSize == 0)
3038 const unsigned int wIndex = dimensionIndices.
GetWidthIndex();
3043 if (outputShape[hIndex] % m_Parameters.m_BlockSize != 0 || outputShape[wIndex] % m_Parameters.m_BlockSize != 0)
3046 "must be divisible by block size.");
3050 if (inputShape[cIndex] % (m_Parameters.m_BlockSize * m_Parameters.m_BlockSize) != 0)
3053 "must be divisible by the square of block size." );
3059 const std::string descriptorName{
"ComparisonQueueDescriptor"};
3061 ValidateNumInputs(workloadInfo, descriptorName, 2);
3062 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3068 ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
3083 const std::string descriptorName{
"ElementwiseUnaryQueueDescriptor"};
3085 ValidateNumInputs(workloadInfo, descriptorName, 1);
3086 ValidateNumOutputs(workloadInfo, descriptorName, 1);
3091 ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
3093 std::vector<DataType> supportedTypes =
3102 ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
3103 ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName,
"input",
"output");
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool IsTypeSpaceMatch(const TensorInfo &other) const
Check that the types are the same and, if quantize, that the quantization parameters are the same...
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetWidthIndex() const
std::vector< unsigned int > m_Origin
const TensorShape & GetShape() const
constexpr bool IsQuantizedType()
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
bool HasPerAxisQuantization() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
Optional< unsigned int > GetQuantizationDim() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Copyright (c) 2020 ARM Limited.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< float > GetQuantizationScales() const
bool HasMultipleQuantizationScales() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetHeightIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
constexpr const char * GetDataTypeName(DataType dataType)
constexpr bool IsQuantized8BitType(DataType dataType)
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_InputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
#define ARMNN_NO_DEPRECATE_WARN_END
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
DataType GetDataType() const
bool has_value() const noexcept
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< TensorInfo > m_OutputTensorInfos
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
DataType GetBiasDataType(DataType inputDataType)
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Outputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
Contains information about inputs and outputs to a layer.
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< ITensorHandle * > m_Inputs
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumDimensions() const
unsigned int GetChannelsIndex() const
void Validate(const WorkloadInfo &workloadInfo) const
unsigned int GetNumElements() const
void Validate(const WorkloadInfo &workloadInfo) const
std::vector< unsigned int > m_Origin