Release 18.03
[platform/upstream/armnn.git] / src / armnn / backends / WorkloadData.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5 #include "WorkloadData.hpp"
6
7 #include "CpuTensorHandle.hpp"
8 #include "WorkloadInfo.hpp"
9
10 #include <algorithm>
11 #include <string>
12 #include <sstream>
13 #include <iomanip>
14
15 #include <boost/format.hpp>
16
17 namespace armnn
18 {
19
20 //---------------------------------------------------------------
21 DataType GetBiasDataType(DataType inputDataType)
22 {
23     switch (inputDataType)
24     {
25         case DataType::Float32:
26             return DataType::Float32;
27         case DataType::QuantisedAsymm8:
28             return DataType::Signed32;
29         default:
30             BOOST_ASSERT_MSG(false, "Invalid input data type");
31             return DataType::Float32;
32     }
33 }
34
35 namespace
36 {
37
38 //---------------------------------------------------------------
39 //android ndk does not support std::to_string function.
40 template <typename T>
41 std::string to_string(T value)
42 {
43     std::ostringstream os;
44     os << value;
45     return os.str();
46 }
47
48 //---------------------------------------------------------------
49 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
50 {
51     if (!ptr)
52     {
53         throw InvalidArgumentException(descName +  ": Invalid null pointer. The " +
54                                       paramName + " parameter must be set.");
55     }
56 }
57
58 //---------------------------------------------------------------
59 void ValidateTensorShapesMatch(const TensorInfo& first,
60                                const TensorInfo& second,
61                                std::string const& descName,
62                                std::string const& firstName,
63                                std::string const& secondName)
64 {
65     if (first.GetShape() != second.GetShape())
66     {
67         throw InvalidArgumentException(descName + ": "
68                                        + firstName + " & " + secondName + " must have identical shapes");
69     }
70 }
71
72 //---------------------------------------------------------------
73 void ValidateNoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
74 {
75     if (workloadInfo.m_InputTensorInfos.size() != 0)
76     {
77         throw InvalidArgumentException(descName +
78             ": Requires no inputs. " +
79             to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided.");
80     }
81 }
82
83 //---------------------------------------------------------------
84 void ValidateSingleInput(const WorkloadInfo& workloadInfo, std::string const& descName)
85 {
86     if (workloadInfo.m_InputTensorInfos.size() != 1)
87     {
88         throw InvalidArgumentException(descName +
89                                        ": Requires exactly one input. " +
90                                        to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided." );
91     }
92 }
93
94 //---------------------------------------------------------------
95 void ValidateTwoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
96 {
97     if (workloadInfo.m_InputTensorInfos.size() != 2)
98     {
99         throw InvalidArgumentException(descName +
100                                        ": Requires exactly two workloadInfo.m_InputTensorInfos. " +
101                                        to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
102     }
103 }
104
105 //---------------------------------------------------------------
106 void ValidateSingleOutput(const WorkloadInfo& workloadInfo, std::string const& descName)
107 {
108     if (workloadInfo.m_OutputTensorInfos.size() != 1)
109     {
110         throw InvalidArgumentException(descName +
111                                        ": Requires exactly one output. " +
112                                        to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
113     }
114 }
115
116 //---------------------------------------------------------------
117 void ValidateTensorNumDimensions(const TensorInfo&  tensor,
118                                  std::string const& descName,
119                                  unsigned int       numDimensions,
120                                  std::string const& tensorName)
121 {
122     if (tensor.GetNumDimensions() != numDimensions)
123     {
124         throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
125             to_string(tensor.GetNumDimensions()) + " dimensions for " +
126             tensorName + " tensor.");
127     }
128 }
129
130 //---------------------------------------------------------------
131 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
132     const std::string& descName, std::string const& tensorName)
133 {
134     if (tensor.GetDataType() != dataType)
135     {
136         throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
137             GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
138     }
139 }
140
141 //---------------------------------------------------------------
142 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
143     const TensorInfo& weightsTensorInfo, const std::string& descName)
144 {
145     if (biasTensor.GetQuantizationOffset() != 0)
146     {
147         throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
148             to_string(biasTensor.GetQuantizationOffset()));
149     }
150     const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
151     if (biasTensor.GetQuantizationScale() != expectedScale)
152     {
153         // Print the float values with extra precision to see very small differences
154         std::stringstream msg;
155         msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
156             " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
157             biasTensor.GetQuantizationScale();
158         throw InvalidArgumentException(msg.str());
159     }
160 }
161
162 //---------------------------------------------------------------
163 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
164     unsigned int numExpected,
165     const std::string& descName,
166     const std::string& varName)
167 {
168     if (vec.empty() && numExpected > 0)
169     {
170         throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
171     }
172
173     for (unsigned int i = 0; i < numExpected; ++i)
174     {
175         if (!vec[i])
176         {
177             throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
178         }
179     }
180 }
181
182 //---------------------------------------------------------------
183 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
184                                         const TensorInfo& second,
185                                         const TensorInfo& output,
186                                         std::string const& descName,
187                                         std::string const& firstName,
188                                         std::string const& secondName)
189 {
190     // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
191     // broadcasted.
192     if (first.GetNumDimensions() != second.GetNumDimensions())
193     {
194         throw InvalidArgumentException(descName  + ": Tensors "
195             + firstName + " & " + secondName
196             + " must have the same number of dimensions in order to be broadcasted");
197     }
198     uint32_t numDims = first.GetNumDimensions();
199     std::vector<uint32_t> outputDims(numDims, 0u);
200     for (uint32_t i = 0; i < numDims; i++)
201     {
202         const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
203         const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
204         if (dimsNotEqual && dimsNotOne)
205         {
206             throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
207         }
208         outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
209     }
210     TensorShape broadcastShape =  TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
211     if (broadcastShape != output.GetShape())
212     {
213         throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
214                                        + firstName + " & " + secondName
215                                        + " does not match the output shape");
216     }
217 }
218
219 //---------------------------------------------------------------
220 /// Validates that the output tensor's quantization scale is greater than the product
221 /// of the two input tensors' quantization scales. This is a requirement of the implementation of
222 /// the quantized multiplication.
223 void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
224     const TensorInfo& outputTensorInfo, std::string const& descName,
225     const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
226 {
227     if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
228     {
229         if (outputTensorInfo.GetQuantizationScale() <=
230             inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
231         {
232             std::stringstream msg;
233             msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
234                 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
235             throw InvalidArgumentException(msg.str());
236         }
237     }
238 }
239
240 } //namespace
241
242 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
243     unsigned int numExpectedIn, unsigned int numExpectedOut) const
244 {
245     ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
246     ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
247 }
248
249 //---------------------------------------------------------------
250 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
251 {
252     ValidateSingleInput(workloadInfo, "MemCopyQueueDescriptor");
253     ValidateSingleOutput(workloadInfo, "MemCopyQueueDescriptor");
254
255     if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
256     {
257         throw InvalidArgumentException(boost::str(
258             boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
259                 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
260     }
261
262     for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
263     {
264         if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
265             workloadInfo.m_OutputTensorInfos[i].GetNumElements())
266         {
267             throw InvalidArgumentException(boost::str(
268                 boost::format("Number of elements for tensor input and output %1% does not match")
269                     % i ));
270         }
271     }
272
273     if (m_Inputs.size() != m_Outputs.size())
274     {
275         throw InvalidArgumentException(boost::str(
276             boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
277                 % m_Inputs.size() % m_Outputs.size()));
278     }
279
280     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
281     {
282         if (!m_Inputs[i])
283         {
284             throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
285         }
286
287         if (!m_Outputs[i])
288         {
289             throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
290         }
291     }
292 }
293
294 //---------------------------------------------------------------
295 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
296 {
297     ValidateSingleInput(workloadInfo, "ActivationQueueDescriptor");
298     ValidateSingleOutput(workloadInfo, "ActivationQueueDescriptor");
299     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
300                               workloadInfo.m_OutputTensorInfos[0],
301                               "ActivationQueueDescriptor",
302                               "input",
303                               "output");
304 }
305
306 //---------------------------------------------------------------
307 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
308 {
309     ValidateSingleInput(workloadInfo, "SoftmaxQueueDescriptor");
310     ValidateSingleOutput(workloadInfo, "SoftmaxQueueDescriptor");
311     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "input");
312     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "output");
313
314     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
315                               workloadInfo.m_OutputTensorInfos[0],
316                               "SoftmaxQueueDescriptor",
317                               "input",
318                               "output");
319 }
320
321 //---------------------------------------------------------------
322 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
323 {
324     ValidateSingleInput(workloadInfo, "SplitterQueueDescriptor");
325
326     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
327     {
328         throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
329     }
330
331     if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
332     {
333         throw InvalidArgumentException(
334             "SplitterQueueDescriptor: Number of split windows "
335             "has to match number of workloadInfo.m_OutputTensorInfos. "
336             "Number of windows: " +
337             to_string(m_ViewOrigins.size()) +
338             ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
339     }
340
341     //the dimensionality of all the windows has to match the dimensionality (not shape) of the input
342     std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
343     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
344     {
345         //check that the dimensionality of input is same as the split windows
346         ViewOrigin const& e = m_ViewOrigins[w];
347         if (e.m_Origin.size() != inputDims)
348         {
349             throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
350                                            "have the same dimensionality as the input tensor. "
351                                            "Window origin (index: " +
352                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
353                                            " dimensions, the input "
354                                            "tensor has " +
355                                            to_string(inputDims) + " dimensions.");
356         }
357         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
358         {
359             if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
360                 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
361             {
362                 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
363                                                "be smaller or equal than the size of the input in that coord.");
364             }
365         }
366     }
367 }
368
369 //---------------------------------------------------------------
370 void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
371 {
372     ValidateSingleOutput(workloadInfo, "MergerQueueDescriptor");
373
374     if (m_Inputs.size() <= 0)
375     {
376         throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
377     }
378     if (m_Outputs.size() <= 0)
379     {
380         throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
381     }
382
383     if (workloadInfo.m_InputTensorInfos.size() <= 0)
384     {
385         throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
386     }
387     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
388     {
389         throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
390     }
391
392     if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
393     {
394         throw InvalidArgumentException(
395             "MergerQueueDescriptor: Number of split windows "
396             "has to match number of workloadInfo.m_InputTensorInfos. "
397             "Number of windows: " +
398             to_string(m_ViewOrigins.size()) +
399             ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
400     }
401
402     //the dimensionality of all the windows has to match the dimensionality (not shape) of the output
403     std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
404     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
405     {
406         //check that the dimensionality of output is same as the split windows
407         ViewOrigin const& e = m_ViewOrigins[w];
408         if (e.m_Origin.size() != outputDims)
409         {
410             throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
411                                            "have the same dimensionality as the output tensor. "
412                                            "Window origin (index: " +
413                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
414                                            " dimensions, the output "
415                                            "tensor has " +
416                                            to_string(outputDims) + " dimensions.");
417         }
418         //check that the merge windows are within the output tensor
419         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
420         {
421             if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
422                 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
423             {
424                 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
425                                                "be smaller or equal than the size of the output in that coord.");
426             }
427         }
428     }
429 }
430
431 //---------------------------------------------------------------
432 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
433 {
434     ValidateSingleInput(workloadInfo, "FullyConnectedQueueDescriptor");
435     ValidateSingleOutput(workloadInfo, "FullyConnectedQueueDescriptor");
436     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
437
438     if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
439           workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
440     {
441         throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
442     }
443
444     if (m_Weight == nullptr)
445     {
446         throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
447     }
448
449     ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
450
451     if (m_Parameters.m_BiasEnabled)
452     {
453         if (m_Bias == nullptr)
454         {
455             throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
456                                            "bias value tensor descriptor is missing.");
457         }
458
459         // validate type and quantization values
460         ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
461             workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
462
463         ValidateTensorDataType(m_Bias->GetTensorInfo(),
464                                GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
465                                "FullyConnectedQueueDescriptor", "bias");
466
467         ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
468     }
469
470     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
471         workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
472 }
473
474 //---------------------------------------------------------------
475 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
476 {
477     ValidateSingleInput(workloadInfo, "NormalizationQueueDescriptor");
478     ValidateSingleOutput(workloadInfo, "NormalizationQueueDescriptor");
479     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
480                               workloadInfo.m_OutputTensorInfos[0],
481                               "NormalizationQueueDescriptor",
482                               "input",
483                               "output");
484 }
485
486 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
487 {
488     ValidateTwoInputs(workloadInfo, "AdditionQueueDescriptor");
489     ValidateSingleOutput(workloadInfo, "AdditionQueueDescriptor");
490
491     ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
492                                        workloadInfo.m_InputTensorInfos[1],
493                                        workloadInfo.m_OutputTensorInfos[0],
494                                        "AdditionQueueDescriptor",
495                                        "first input",
496                                        "second input");
497
498 }
499
500 //---------------------------------------------------------------
501 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
502 {
503     ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor");
504     ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor");
505
506     ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
507                                        workloadInfo.m_InputTensorInfos[1],
508                                        workloadInfo.m_OutputTensorInfos[0],
509                                        "MultiplicationQueueDescriptor",
510                                        "first input",
511                                        "second input");
512 }
513
514 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
515 {
516     ValidateSingleInput(workloadInfo, "BatchNormalizationQueueDescriptor");
517     ValidateSingleOutput(workloadInfo, "BatchNormalizationQueueDescriptor");
518     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
519                               workloadInfo.m_OutputTensorInfos[0],
520                               "BatchNormalizationQueueDescriptor",
521                               "input",
522                               "output");
523     ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
524     ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
525     ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
526     ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
527
528
529     ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
530     ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
531     ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
532     ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
533
534     ValidateTensorShapesMatch(
535         m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
536     ValidateTensorShapesMatch(
537         m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
538     ValidateTensorShapesMatch(
539         m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
540 }
541
542 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
543 {
544     ValidateSingleInput(workloadInfo, "Convolution2dQueueDescriptor");
545     ValidateSingleOutput(workloadInfo, "Convolution2dQueueDescriptor");
546
547     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
548     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
549
550     ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
551     ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
552     ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
553         "Convolution2dQueueDescriptor", "weight");
554     if (m_Parameters.m_BiasEnabled)
555     {
556         ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
557         ValidateTensorDataType(m_Bias->GetTensorInfo(),
558                                GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
559                                "Convolution2dQueueDescriptor", "bias");
560         ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
561             workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
562     }
563
564     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
565         workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
566 }
567
568 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
569 {
570     ValidateSingleInput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
571     ValidateSingleOutput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
572
573     ValidateTensorNumDimensions(
574         workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
575     ValidateTensorNumDimensions(
576         workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
577
578     ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
579     ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
580
581     //inputChannels * channelMultiplier should be equal to outputChannels
582     const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
583     const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
584     const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[1];
585     if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
586     {
587         throw InvalidArgumentException(
588             boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
589                                      "equal to input_channels (provided %2%) multiplied by channel_multiplier "
590                                      "(provided %3%).")
591                                      % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
592     }
593
594     if (m_Parameters.m_BiasEnabled)
595     {
596         ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
597         ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
598         ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
599             workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
600
601         ValidateTensorDataType(m_Bias->GetTensorInfo(),
602                                GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
603                                "DepthwiseConvolution2dQueueDescriptor", "bias");
604     }
605
606     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
607         workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
608 }
609
610 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
611 {
612     ValidateSingleInput(workloadInfo, "PermuteQueueDescriptor");
613     ValidateSingleOutput(workloadInfo, "PermuteQueueDescriptor");
614
615     const PermutationVector& mapping = m_Parameters.m_DimMappings;
616
617     const TensorInfo& input  = workloadInfo.m_InputTensorInfos[0];
618     const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
619
620     ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
621     ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
622
623     for (unsigned int i = 0; i < mapping.GetSize(); ++i)
624     {
625         if (input.GetShape()[i] != output.GetShape()[mapping[i]])
626         {
627             throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
628                                                " (=" + to_string(input.GetShape()[i]) + ") " +
629                                                "must match dst dimension " + to_string(mapping[i]) +
630                                                " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
631         }
632     }
633 }
634
635 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
636 {
637     ValidateSingleInput(workloadInfo, "Pooling2dQueueDescriptor");
638     ValidateSingleOutput(workloadInfo, "Pooling2dQueueDescriptor");
639
640     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
641     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
642 }
643
644 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
645 {
646     ValidateSingleInput(workloadInfo, "ResizeBilinearQueueDescriptor");
647     ValidateSingleOutput(workloadInfo, "ResizeBilinearQueueDescriptor");
648
649     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
650     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
651
652     // Resize bilinear only changes width and height: batch and channel count must match
653     {
654         const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
655         const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
656         if (inputBatchSize != outputBatchSize)
657         {
658             throw InvalidArgumentException(
659                 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
660                     "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
661         }
662     }
663
664     {
665         const unsigned int inputChannelCount = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
666         const unsigned int outputChannelCount = workloadInfo.m_OutputTensorInfos[0].GetShape()[1];
667         if (inputChannelCount != outputChannelCount)
668         {
669             throw InvalidArgumentException(
670                 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
671                     "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
672         }
673     }
674 }
675
676 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
677 {
678     ValidateSingleInput(workloadInfo, "FakeQuantizationQueueDescriptor");
679     ValidateSingleOutput(workloadInfo, "FakeQuantizationQueueDescriptor");
680
681     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
682     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
683     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
684         workloadInfo.m_OutputTensorInfos[0],
685         "FakeQuantizationQueueDescriptor",
686         "input",
687         "output");
688     if (m_Parameters.m_Min > m_Parameters.m_Max)
689     {
690         throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
691     }
692
693 }
694
695 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
696 {
697     ValidateSingleInput(workloadInfo, "L2NormalizationQueueDescriptor");
698     ValidateSingleOutput(workloadInfo, "L2NormalizationQueueDescriptor");
699
700     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
701     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
702     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
703         workloadInfo.m_OutputTensorInfos[0],
704         "L2NormalizationQueueDescriptor",
705         "input",
706         "output");
707 }
708
709 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
710 {
711     ValidateNoInputs(workloadInfo, "ConstantQueueDescriptor");
712     ValidateSingleOutput(workloadInfo, "ConstantQueueDescriptor");
713
714     if (!m_LayerOutput)
715     {
716         throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
717     }
718
719     ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
720         workloadInfo.m_OutputTensorInfos[0],
721         "ConstantQueueDescriptor",
722         "constant",
723         "output");
724 }
725
726 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
727 {
728     ValidateSingleInput(workloadInfo, "ReshapeQueueDescriptor");
729     ValidateSingleOutput(workloadInfo, "ReshapeQueueDescriptor");
730
731     if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
732     {
733         throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
734             to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
735             to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
736     }
737 }
738
739 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
740 {
741     ValidateSingleInput(workloadInfo, "FloorQueueDescriptor");
742     ValidateSingleOutput(workloadInfo, "FlootQueueDescriptor");
743
744     if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
745     {
746         throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
747     }
748 }
749
750 } //namespace armnn