2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
5 #include "WorkloadData.hpp"
7 #include "CpuTensorHandle.hpp"
8 #include "WorkloadInfo.hpp"
15 #include <boost/format.hpp>
20 //---------------------------------------------------------------
21 DataType GetBiasDataType(DataType inputDataType)
23 switch (inputDataType)
25 case DataType::Float32:
26 return DataType::Float32;
27 case DataType::QuantisedAsymm8:
28 return DataType::Signed32;
30 BOOST_ASSERT_MSG(false, "Invalid input data type");
31 return DataType::Float32;
38 //---------------------------------------------------------------
39 //android ndk does not support std::to_string function.
41 std::string to_string(T value)
43 std::ostringstream os;
48 //---------------------------------------------------------------
49 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
53 throw InvalidArgumentException(descName + ": Invalid null pointer. The " +
54 paramName + " parameter must be set.");
58 //---------------------------------------------------------------
59 void ValidateTensorShapesMatch(const TensorInfo& first,
60 const TensorInfo& second,
61 std::string const& descName,
62 std::string const& firstName,
63 std::string const& secondName)
65 if (first.GetShape() != second.GetShape())
67 throw InvalidArgumentException(descName + ": "
68 + firstName + " & " + secondName + " must have identical shapes");
72 //---------------------------------------------------------------
73 void ValidateNoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
75 if (workloadInfo.m_InputTensorInfos.size() != 0)
77 throw InvalidArgumentException(descName +
78 ": Requires no inputs. " +
79 to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided.");
83 //---------------------------------------------------------------
84 void ValidateSingleInput(const WorkloadInfo& workloadInfo, std::string const& descName)
86 if (workloadInfo.m_InputTensorInfos.size() != 1)
88 throw InvalidArgumentException(descName +
89 ": Requires exactly one input. " +
90 to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided." );
94 //---------------------------------------------------------------
95 void ValidateTwoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
97 if (workloadInfo.m_InputTensorInfos.size() != 2)
99 throw InvalidArgumentException(descName +
100 ": Requires exactly two workloadInfo.m_InputTensorInfos. " +
101 to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
105 //---------------------------------------------------------------
106 void ValidateSingleOutput(const WorkloadInfo& workloadInfo, std::string const& descName)
108 if (workloadInfo.m_OutputTensorInfos.size() != 1)
110 throw InvalidArgumentException(descName +
111 ": Requires exactly one output. " +
112 to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
116 //---------------------------------------------------------------
117 void ValidateTensorNumDimensions(const TensorInfo& tensor,
118 std::string const& descName,
119 unsigned int numDimensions,
120 std::string const& tensorName)
122 if (tensor.GetNumDimensions() != numDimensions)
124 throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
125 to_string(tensor.GetNumDimensions()) + " dimensions for " +
126 tensorName + " tensor.");
130 //---------------------------------------------------------------
131 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
132 const std::string& descName, std::string const& tensorName)
134 if (tensor.GetDataType() != dataType)
136 throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
137 GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
141 //---------------------------------------------------------------
142 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
143 const TensorInfo& weightsTensorInfo, const std::string& descName)
145 if (biasTensor.GetQuantizationOffset() != 0)
147 throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
148 to_string(biasTensor.GetQuantizationOffset()));
150 const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
151 if (biasTensor.GetQuantizationScale() != expectedScale)
153 // Print the float values with extra precision to see very small differences
154 std::stringstream msg;
155 msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
156 " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
157 biasTensor.GetQuantizationScale();
158 throw InvalidArgumentException(msg.str());
162 //---------------------------------------------------------------
163 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
164 unsigned int numExpected,
165 const std::string& descName,
166 const std::string& varName)
168 if (vec.empty() && numExpected > 0)
170 throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
173 for (unsigned int i = 0; i < numExpected; ++i)
177 throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
182 //---------------------------------------------------------------
183 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
184 const TensorInfo& second,
185 const TensorInfo& output,
186 std::string const& descName,
187 std::string const& firstName,
188 std::string const& secondName)
190 // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
192 if (first.GetNumDimensions() != second.GetNumDimensions())
194 throw InvalidArgumentException(descName + ": Tensors "
195 + firstName + " & " + secondName
196 + " must have the same number of dimensions in order to be broadcasted");
198 uint32_t numDims = first.GetNumDimensions();
199 std::vector<uint32_t> outputDims(numDims, 0u);
200 for (uint32_t i = 0; i < numDims; i++)
202 const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
203 const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
204 if (dimsNotEqual && dimsNotOne)
206 throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
208 outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
210 TensorShape broadcastShape = TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
211 if (broadcastShape != output.GetShape())
213 throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
214 + firstName + " & " + secondName
215 + " does not match the output shape");
219 //---------------------------------------------------------------
220 /// Validates that the output tensor's quantization scale is greater than the product
221 /// of the two input tensors' quantization scales. This is a requirement of the implementation of
222 /// the quantized multiplication.
223 void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
224 const TensorInfo& outputTensorInfo, std::string const& descName,
225 const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
227 if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
229 if (outputTensorInfo.GetQuantizationScale() <=
230 inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
232 std::stringstream msg;
233 msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
234 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
235 throw InvalidArgumentException(msg.str());
242 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
243 unsigned int numExpectedIn, unsigned int numExpectedOut) const
245 ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
246 ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
249 //---------------------------------------------------------------
250 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
252 ValidateSingleInput(workloadInfo, "MemCopyQueueDescriptor");
253 ValidateSingleOutput(workloadInfo, "MemCopyQueueDescriptor");
255 if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
257 throw InvalidArgumentException(boost::str(
258 boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
259 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
262 for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
264 if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
265 workloadInfo.m_OutputTensorInfos[i].GetNumElements())
267 throw InvalidArgumentException(boost::str(
268 boost::format("Number of elements for tensor input and output %1% does not match")
273 if (m_Inputs.size() != m_Outputs.size())
275 throw InvalidArgumentException(boost::str(
276 boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
277 % m_Inputs.size() % m_Outputs.size()));
280 for (unsigned int i = 0; i < m_Inputs.size(); ++i)
284 throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
289 throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
294 //---------------------------------------------------------------
295 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
297 ValidateSingleInput(workloadInfo, "ActivationQueueDescriptor");
298 ValidateSingleOutput(workloadInfo, "ActivationQueueDescriptor");
299 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
300 workloadInfo.m_OutputTensorInfos[0],
301 "ActivationQueueDescriptor",
306 //---------------------------------------------------------------
307 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
309 ValidateSingleInput(workloadInfo, "SoftmaxQueueDescriptor");
310 ValidateSingleOutput(workloadInfo, "SoftmaxQueueDescriptor");
311 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "input");
312 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "output");
314 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
315 workloadInfo.m_OutputTensorInfos[0],
316 "SoftmaxQueueDescriptor",
321 //---------------------------------------------------------------
322 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
324 ValidateSingleInput(workloadInfo, "SplitterQueueDescriptor");
326 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
328 throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
331 if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
333 throw InvalidArgumentException(
334 "SplitterQueueDescriptor: Number of split windows "
335 "has to match number of workloadInfo.m_OutputTensorInfos. "
336 "Number of windows: " +
337 to_string(m_ViewOrigins.size()) +
338 ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
341 //the dimensionality of all the windows has to match the dimensionality (not shape) of the input
342 std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
343 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
345 //check that the dimensionality of input is same as the split windows
346 ViewOrigin const& e = m_ViewOrigins[w];
347 if (e.m_Origin.size() != inputDims)
349 throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
350 "have the same dimensionality as the input tensor. "
351 "Window origin (index: " +
352 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
353 " dimensions, the input "
355 to_string(inputDims) + " dimensions.");
357 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
359 if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
360 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
362 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
363 "be smaller or equal than the size of the input in that coord.");
369 //---------------------------------------------------------------
370 void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
372 ValidateSingleOutput(workloadInfo, "MergerQueueDescriptor");
374 if (m_Inputs.size() <= 0)
376 throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
378 if (m_Outputs.size() <= 0)
380 throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
383 if (workloadInfo.m_InputTensorInfos.size() <= 0)
385 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
387 if (workloadInfo.m_OutputTensorInfos.size() <= 0)
389 throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
392 if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
394 throw InvalidArgumentException(
395 "MergerQueueDescriptor: Number of split windows "
396 "has to match number of workloadInfo.m_InputTensorInfos. "
397 "Number of windows: " +
398 to_string(m_ViewOrigins.size()) +
399 ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
402 //the dimensionality of all the windows has to match the dimensionality (not shape) of the output
403 std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
404 for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
406 //check that the dimensionality of output is same as the split windows
407 ViewOrigin const& e = m_ViewOrigins[w];
408 if (e.m_Origin.size() != outputDims)
410 throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
411 "have the same dimensionality as the output tensor. "
412 "Window origin (index: " +
413 to_string(w) + ") has " + to_string(e.m_Origin.size()) +
414 " dimensions, the output "
416 to_string(outputDims) + " dimensions.");
418 //check that the merge windows are within the output tensor
419 for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
421 if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
422 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
424 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
425 "be smaller or equal than the size of the output in that coord.");
431 //---------------------------------------------------------------
432 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
434 ValidateSingleInput(workloadInfo, "FullyConnectedQueueDescriptor");
435 ValidateSingleOutput(workloadInfo, "FullyConnectedQueueDescriptor");
436 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
438 if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
439 workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
441 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
444 if (m_Weight == nullptr)
446 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
449 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
451 if (m_Parameters.m_BiasEnabled)
453 if (m_Bias == nullptr)
455 throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
456 "bias value tensor descriptor is missing.");
459 // validate type and quantization values
460 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
461 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
463 ValidateTensorDataType(m_Bias->GetTensorInfo(),
464 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
465 "FullyConnectedQueueDescriptor", "bias");
467 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
470 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
471 workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
474 //---------------------------------------------------------------
475 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
477 ValidateSingleInput(workloadInfo, "NormalizationQueueDescriptor");
478 ValidateSingleOutput(workloadInfo, "NormalizationQueueDescriptor");
479 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
480 workloadInfo.m_OutputTensorInfos[0],
481 "NormalizationQueueDescriptor",
486 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
488 ValidateTwoInputs(workloadInfo, "AdditionQueueDescriptor");
489 ValidateSingleOutput(workloadInfo, "AdditionQueueDescriptor");
491 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
492 workloadInfo.m_InputTensorInfos[1],
493 workloadInfo.m_OutputTensorInfos[0],
494 "AdditionQueueDescriptor",
500 //---------------------------------------------------------------
501 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
503 ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor");
504 ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor");
506 ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
507 workloadInfo.m_InputTensorInfos[1],
508 workloadInfo.m_OutputTensorInfos[0],
509 "MultiplicationQueueDescriptor",
514 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
516 ValidateSingleInput(workloadInfo, "BatchNormalizationQueueDescriptor");
517 ValidateSingleOutput(workloadInfo, "BatchNormalizationQueueDescriptor");
518 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
519 workloadInfo.m_OutputTensorInfos[0],
520 "BatchNormalizationQueueDescriptor",
523 ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
524 ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
525 ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
526 ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
529 ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
530 ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
531 ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
532 ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
534 ValidateTensorShapesMatch(
535 m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
536 ValidateTensorShapesMatch(
537 m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
538 ValidateTensorShapesMatch(
539 m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
542 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
544 ValidateSingleInput(workloadInfo, "Convolution2dQueueDescriptor");
545 ValidateSingleOutput(workloadInfo, "Convolution2dQueueDescriptor");
547 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
548 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
550 ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
551 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
552 ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
553 "Convolution2dQueueDescriptor", "weight");
554 if (m_Parameters.m_BiasEnabled)
556 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
557 ValidateTensorDataType(m_Bias->GetTensorInfo(),
558 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
559 "Convolution2dQueueDescriptor", "bias");
560 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
561 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
564 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
565 workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
568 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
570 ValidateSingleInput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
571 ValidateSingleOutput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
573 ValidateTensorNumDimensions(
574 workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
575 ValidateTensorNumDimensions(
576 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
578 ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
579 ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
581 //inputChannels * channelMultiplier should be equal to outputChannels
582 const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
583 const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
584 const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[1];
585 if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
587 throw InvalidArgumentException(
588 boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
589 "equal to input_channels (provided %2%) multiplied by channel_multiplier "
591 % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
594 if (m_Parameters.m_BiasEnabled)
596 ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
597 ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
598 ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
599 workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
601 ValidateTensorDataType(m_Bias->GetTensorInfo(),
602 GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
603 "DepthwiseConvolution2dQueueDescriptor", "bias");
606 ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
607 workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
610 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
612 ValidateSingleInput(workloadInfo, "PermuteQueueDescriptor");
613 ValidateSingleOutput(workloadInfo, "PermuteQueueDescriptor");
615 const PermutationVector& mapping = m_Parameters.m_DimMappings;
617 const TensorInfo& input = workloadInfo.m_InputTensorInfos[0];
618 const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
620 ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
621 ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
623 for (unsigned int i = 0; i < mapping.GetSize(); ++i)
625 if (input.GetShape()[i] != output.GetShape()[mapping[i]])
627 throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
628 " (=" + to_string(input.GetShape()[i]) + ") " +
629 "must match dst dimension " + to_string(mapping[i]) +
630 " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
635 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
637 ValidateSingleInput(workloadInfo, "Pooling2dQueueDescriptor");
638 ValidateSingleOutput(workloadInfo, "Pooling2dQueueDescriptor");
640 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
641 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
644 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
646 ValidateSingleInput(workloadInfo, "ResizeBilinearQueueDescriptor");
647 ValidateSingleOutput(workloadInfo, "ResizeBilinearQueueDescriptor");
649 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
650 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
652 // Resize bilinear only changes width and height: batch and channel count must match
654 const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
655 const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
656 if (inputBatchSize != outputBatchSize)
658 throw InvalidArgumentException(
659 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
660 "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
665 const unsigned int inputChannelCount = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
666 const unsigned int outputChannelCount = workloadInfo.m_OutputTensorInfos[0].GetShape()[1];
667 if (inputChannelCount != outputChannelCount)
669 throw InvalidArgumentException(
670 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
671 "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
676 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
678 ValidateSingleInput(workloadInfo, "FakeQuantizationQueueDescriptor");
679 ValidateSingleOutput(workloadInfo, "FakeQuantizationQueueDescriptor");
681 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
682 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
683 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
684 workloadInfo.m_OutputTensorInfos[0],
685 "FakeQuantizationQueueDescriptor",
688 if (m_Parameters.m_Min > m_Parameters.m_Max)
690 throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
695 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
697 ValidateSingleInput(workloadInfo, "L2NormalizationQueueDescriptor");
698 ValidateSingleOutput(workloadInfo, "L2NormalizationQueueDescriptor");
700 ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
701 ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
702 ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
703 workloadInfo.m_OutputTensorInfos[0],
704 "L2NormalizationQueueDescriptor",
709 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
711 ValidateNoInputs(workloadInfo, "ConstantQueueDescriptor");
712 ValidateSingleOutput(workloadInfo, "ConstantQueueDescriptor");
716 throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
719 ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
720 workloadInfo.m_OutputTensorInfos[0],
721 "ConstantQueueDescriptor",
726 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
728 ValidateSingleInput(workloadInfo, "ReshapeQueueDescriptor");
729 ValidateSingleOutput(workloadInfo, "ReshapeQueueDescriptor");
731 if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
733 throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
734 to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
735 to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
739 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
741 ValidateSingleInput(workloadInfo, "FloorQueueDescriptor");
742 ValidateSingleOutput(workloadInfo, "FlootQueueDescriptor");
744 if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
746 throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");