Release 18.02
[platform/upstream/armnn.git] / src / armnn / backends / WorkloadData.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5 #include "WorkloadData.hpp"
6
7 #include "CpuTensorHandle.hpp"
8 #include "WorkloadInfo.hpp"
9
10 #include <algorithm>
11 #include <string>
12 #include <sstream>
13 #include <iomanip>
14
15 #include <boost/format.hpp>
16
17 namespace armnn
18 {
19
20 //---------------------------------------------------------------
21 DataType GetBiasDataType(DataType inputDataType)
22 {
23     switch (inputDataType)
24     {
25         case DataType::Float32:
26             return DataType::Float32;
27         case DataType::QuantisedAsymm8:
28             return DataType::Signed32;
29         default:
30             BOOST_ASSERT_MSG(false, "Invalid input data type");
31             return DataType::Float32;
32     }
33 }
34
35 namespace
36 {
37
38 //---------------------------------------------------------------
39 //android ndk does not support std::to_string function.
40 template <typename T>
41 std::string to_string(T value)
42 {
43     std::ostringstream os;
44     os << value;
45     return os.str();
46 }
47
48 //---------------------------------------------------------------
49 void ValidatePointer(const void* ptr, std::string const& descName, std::string const& paramName)
50 {
51     if (!ptr)
52     {
53         throw InvalidArgumentException(descName +  ": Invalid null pointer. The " +
54                                       paramName + " parameter must be set.");
55     }
56 }
57
58 //---------------------------------------------------------------
59 void ValidateTensorShapesMatch(const TensorInfo& first,
60                                const TensorInfo& second,
61                                std::string const& descName,
62                                std::string const& firstName,
63                                std::string const& secondName)
64 {
65     if (first.GetShape() != second.GetShape())
66     {
67         throw InvalidArgumentException(descName + ": "
68                                        + firstName + " & " + secondName + " must have identical shapes");
69     }
70 }
71
72 //---------------------------------------------------------------
73 void ValidateNoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
74 {
75     if (workloadInfo.m_InputTensorInfos.size() != 0)
76     {
77         throw InvalidArgumentException(descName +
78             ": Requires no inputs. " +
79             to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided.");
80     }
81 }
82
83 //---------------------------------------------------------------
84 void ValidateSingleInput(const WorkloadInfo& workloadInfo, std::string const& descName)
85 {
86     if (workloadInfo.m_InputTensorInfos.size() != 1)
87     {
88         throw InvalidArgumentException(descName +
89                                        ": Requires exactly one input. " +
90                                        to_string(workloadInfo.m_InputTensorInfos.size()) + " has been provided." );
91     }
92 }
93
94 //---------------------------------------------------------------
95 void ValidateTwoInputs(const WorkloadInfo& workloadInfo, std::string const& descName)
96 {
97     if (workloadInfo.m_InputTensorInfos.size() != 2)
98     {
99         throw InvalidArgumentException(descName +
100                                        ": Requires exactly two workloadInfo.m_InputTensorInfos. " +
101                                        to_string(workloadInfo.m_InputTensorInfos.size()) + " have been provided.");
102     }
103 }
104
105 //---------------------------------------------------------------
106 void ValidateSingleOutput(const WorkloadInfo& workloadInfo, std::string const& descName)
107 {
108     if (workloadInfo.m_OutputTensorInfos.size() != 1)
109     {
110         throw InvalidArgumentException(descName +
111                                        ": Requires exactly one output. " +
112                                        to_string(workloadInfo.m_OutputTensorInfos.size()) + " has been provided.");
113     }
114 }
115
116 //---------------------------------------------------------------
117 void ValidateTensorNumDimensions(const TensorInfo&  tensor,
118                                  std::string const& descName,
119                                  unsigned int       numDimensions,
120                                  std::string const& tensorName)
121 {
122     if (tensor.GetNumDimensions() != numDimensions)
123     {
124         throw InvalidArgumentException(descName + ": Expected " + to_string(numDimensions) + " but got " +
125             to_string(tensor.GetNumDimensions()) + " dimensions for " +
126             tensorName + " tensor.");
127     }
128 }
129
130 //---------------------------------------------------------------
131 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
132     const std::string& descName, std::string const& tensorName)
133 {
134     if (tensor.GetDataType() != dataType)
135     {
136         throw InvalidArgumentException(descName + ": Expected data type " + GetDataTypeName(dataType) + " but got " +
137             GetDataTypeName(tensor.GetDataType()) + " for " + tensorName + " tensor.");
138     }
139 }
140
141 //---------------------------------------------------------------
142 void ValidateBiasTensorQuantization(const TensorInfo& biasTensor, const TensorInfo& inputTensorInfo,
143     const TensorInfo& weightsTensorInfo, const std::string& descName)
144 {
145     if (biasTensor.GetQuantizationOffset() != 0)
146     {
147         throw InvalidArgumentException(descName + ": Expected zero quantization offset for bias tensor but got " +
148             to_string(biasTensor.GetQuantizationOffset()));
149     }
150     const float expectedScale = inputTensorInfo.GetQuantizationScale() * weightsTensorInfo.GetQuantizationScale();
151     if (biasTensor.GetQuantizationScale() != expectedScale)
152     {
153         // Print the float values with extra precision to see very small differences
154         std::stringstream msg;
155         msg << std::setprecision(10) << descName << ": Expected " << expectedScale <<
156             " quantization scale for bias tensor (the product of the input and weight scales), but got " <<
157             biasTensor.GetQuantizationScale();
158         throw InvalidArgumentException(msg.str());
159     }
160 }
161
162 //---------------------------------------------------------------
163 void ValidateTensors(const std::vector<ITensorHandle*>& vec,
164     unsigned int numExpected,
165     const std::string& descName,
166     const std::string& varName)
167 {
168     if (vec.empty() && numExpected > 0)
169     {
170         throw InvalidArgumentException(descName + ": Invalid empty " + varName + " array.");
171     }
172
173     for (unsigned int i = 0; i < numExpected; ++i)
174     {
175         if (!vec[i])
176         {
177             throw InvalidArgumentException(descName + ": Invalid NULL for " + varName + to_string(i));
178         }
179     }
180 }
181
182 //---------------------------------------------------------------
183 void ValidateBroadcastTensorShapesMatch(const TensorInfo& first,
184                                         const TensorInfo& second,
185                                         const TensorInfo& output,
186                                         std::string const& descName,
187                                         std::string const& firstName,
188                                         std::string const& secondName)
189 {
190     // Tensors must have the same number of dimensions in order to be explicit about which dimensions will get
191     // broadcasted.
192     if (first.GetNumDimensions() != second.GetNumDimensions())
193     {
194         throw InvalidArgumentException(descName  + ": Tensors "
195             + firstName + " & " + secondName
196             + " must have the same number of dimensions in order to be broadcasted");
197     }
198     uint32_t numDims = first.GetNumDimensions();
199     std::vector<uint32_t> outputDims(numDims, 0u);
200     for (uint32_t i = 0; i < numDims; i++)
201     {
202         const bool dimsNotEqual = first.GetShape()[i] != second.GetShape()[i];
203         const bool dimsNotOne = (first.GetShape()[i] != 1) && (second.GetShape()[i] != 1);
204         if (dimsNotEqual && dimsNotOne)
205         {
206             throw InvalidArgumentException("Broadcasting is not possible for incompatible shapes");
207         }
208         outputDims[i] = std::max(first.GetShape()[i], second.GetShape()[i]);
209     }
210     TensorShape broadcastShape =  TensorShape(boost::numeric_cast<unsigned int>(outputDims.size()), outputDims.data());
211     if (broadcastShape != output.GetShape())
212     {
213         throw InvalidArgumentException(descName + ": The tensor shape resulting from adding "
214                                        + firstName + " & " + secondName
215                                        + " does not match the output shape");
216     }
217 }
218
219 //---------------------------------------------------------------
220 /// Validates that the output tensor's quantization scale is greater than the product
221 /// of the two input tensors' quantization scales. This is a requirement of the implementation of
222 /// the quantized multiplication.
223 void ValidateTensorQuantizationMultiplier(const TensorInfo& inputTensor1, const TensorInfo& inputTensor2,
224     const TensorInfo& outputTensorInfo, std::string const& descName,
225     const std::string& inputTensor1Name, const std::string& inputTensor2Name, const std::string& outputTensorName)
226 {
227     if (outputTensorInfo.GetDataType() == DataType::QuantisedAsymm8)
228     {
229         if (outputTensorInfo.GetQuantizationScale() <=
230             inputTensor1.GetQuantizationScale() * inputTensor2.GetQuantizationScale())
231         {
232             std::stringstream msg;
233             msg << descName << ": Quantization scale of " << outputTensorName << " is not greater than " <<
234                 "the product of the " << inputTensor1Name << " and " << inputTensor2Name << " tensors";
235             throw InvalidArgumentException(msg.str());
236         }
237     }
238 }
239
240 } //namespace
241
242 void QueueDescriptor::ValidateInputsOutputs(const std::string& descName,
243     unsigned int numExpectedIn, unsigned int numExpectedOut) const
244 {
245     ValidateTensors(m_Inputs, numExpectedIn, descName, "input");
246     ValidateTensors(m_Outputs, numExpectedOut, descName, "output");
247 }
248
249 //---------------------------------------------------------------
250 void MemCopyQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
251 {
252     ValidateSingleInput(workloadInfo, "MemCopyQueueDescriptor");
253     ValidateSingleOutput(workloadInfo, "MemCopyQueueDescriptor");
254
255     if (workloadInfo.m_InputTensorInfos.size() != workloadInfo.m_OutputTensorInfos.size())
256     {
257         throw InvalidArgumentException(boost::str(
258             boost::format("Number of input infos (%1%) does not match the number of output infos (%2%)")
259                 % workloadInfo.m_InputTensorInfos.size() % workloadInfo.m_OutputTensorInfos.size()));
260     }
261
262     for (std::size_t i = 0; i < workloadInfo.m_InputTensorInfos.size(); ++i)
263     {
264         if (workloadInfo.m_InputTensorInfos[i].GetNumElements() !=
265             workloadInfo.m_OutputTensorInfos[i].GetNumElements())
266         {
267             throw InvalidArgumentException(boost::str(
268                 boost::format("Number of elements for tensor input and output %1% does not match")
269                     % i ));
270         }
271     }
272
273     if (m_Inputs.size() != m_Outputs.size())
274     {
275         throw InvalidArgumentException(boost::str(
276             boost::format("Number of inputs (%1%) does not match the number of outputs (%2%)")
277                 % m_Inputs.size() % m_Outputs.size()));
278     }
279
280     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
281     {
282         if (!m_Inputs[i])
283         {
284             throw InvalidArgumentException(boost::str(boost::format("Invalid null input %1%") % i));
285         }
286
287         if (!m_Outputs[i])
288         {
289             throw InvalidArgumentException(boost::str(boost::format("Invalid null output %1%") % i));
290         }
291     }
292 }
293
294 //---------------------------------------------------------------
295 void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
296 {
297     ValidateSingleInput(workloadInfo, "ActivationQueueDescriptor");
298     ValidateSingleOutput(workloadInfo, "ActivationQueueDescriptor");
299     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
300                               workloadInfo.m_OutputTensorInfos[0],
301                               "ActivationQueueDescriptor",
302                               "input",
303                               "output");
304 }
305
306 //---------------------------------------------------------------
307 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
308 {
309     ValidateSingleInput(workloadInfo, "SoftmaxQueueDescriptor");
310     ValidateSingleOutput(workloadInfo, "SoftmaxQueueDescriptor");
311     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "input");
312     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "SoftmaxQueueDescriptor", 2, "output");
313
314     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
315                               workloadInfo.m_OutputTensorInfos[0],
316                               "SoftmaxQueueDescriptor",
317                               "input",
318                               "output");
319 }
320
321 //---------------------------------------------------------------
322 void SplitterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
323 {
324     ValidateSingleInput(workloadInfo, "SplitterQueueDescriptor");
325
326     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
327     {
328         throw InvalidArgumentException("SplitterQueueDescriptor: At least one output needs to be provided.");
329     }
330
331     if (workloadInfo.m_OutputTensorInfos.size() != m_ViewOrigins.size())
332     {
333         throw InvalidArgumentException(
334             "SplitterQueueDescriptor: Number of split windows "
335             "has to match number of workloadInfo.m_OutputTensorInfos. "
336             "Number of windows: " +
337             to_string(m_ViewOrigins.size()) +
338             ". Number of workloadInfo.m_OutputTensorInfos: " + to_string(workloadInfo.m_OutputTensorInfos.size()));
339     }
340
341     //the dimensionality of all the windows has to match the dimensionality (not shape) of the input
342     std::size_t inputDims = workloadInfo.m_InputTensorInfos[0].GetNumDimensions();
343     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
344     {
345         //check that the dimensionality of input is same as the split windows
346         ViewOrigin const& e = m_ViewOrigins[w];
347         if (e.m_Origin.size() != inputDims)
348         {
349             throw InvalidArgumentException("SplitterQueueDescriptor: Window origin have to "
350                                            "have the same dimensionality as the input tensor. "
351                                            "Window origin (index: " +
352                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
353                                            " dimensions, the input "
354                                            "tensor has " +
355                                            to_string(inputDims) + " dimensions.");
356         }
357         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
358         {
359             if (e.m_Origin[i] + workloadInfo.m_OutputTensorInfos[w].GetShape()[i] >
360                 workloadInfo.m_InputTensorInfos[0].GetShape()[i])
361             {
362                 throw InvalidArgumentException("SplitterQueueDescriptor: Window extent coordinates have to "
363                                                "be smaller or equal than the size of the input in that coord.");
364             }
365         }
366     }
367 }
368
369 //---------------------------------------------------------------
370 void MergerQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
371 {
372     ValidateSingleOutput(workloadInfo, "MergerQueueDescriptor");
373
374     if (m_Inputs.size() <= 0)
375     {
376         throw InvalidArgumentException("MergerQueueDescriptor: At least one input needs to be provided.");
377     }
378     if (m_Outputs.size() <= 0)
379     {
380         throw InvalidArgumentException("MergerQueueDescriptor: At least one output needs to be provided.");
381     }
382
383     if (workloadInfo.m_InputTensorInfos.size() <= 0)
384     {
385         throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo input needs to be provided.");
386     }
387     if (workloadInfo.m_OutputTensorInfos.size() <= 0)
388     {
389         throw InvalidArgumentException("MergerQueueDescriptor: At least one TensorInfo output needs to be provided.");
390     }
391
392     if (workloadInfo.m_InputTensorInfos.size() != m_ViewOrigins.size())
393     {
394         throw InvalidArgumentException(
395             "MergerQueueDescriptor: Number of split windows "
396             "has to match number of workloadInfo.m_InputTensorInfos. "
397             "Number of windows: " +
398             to_string(m_ViewOrigins.size()) +
399             ". Number of workloadInfo.m_InputTensorInfos: " + to_string(workloadInfo.m_InputTensorInfos.size()));
400     }
401
402     //the dimensionality of all the windows has to match the dimensionality (not shape) of the output
403     std::size_t outputDims = workloadInfo.m_OutputTensorInfos[0].GetNumDimensions();
404     for(unsigned int w = 0; w < m_ViewOrigins.size(); ++w )
405     {
406         //check that the dimensionality of output is same as the split windows
407         ViewOrigin const& e = m_ViewOrigins[w];
408         if (e.m_Origin.size() != outputDims)
409         {
410             throw InvalidArgumentException("MergerQueueDescriptor: Window origin have to "
411                                            "have the same dimensionality as the output tensor. "
412                                            "Window origin (index: " +
413                                            to_string(w) + ") has " + to_string(e.m_Origin.size()) +
414                                            " dimensions, the output "
415                                            "tensor has " +
416                                            to_string(outputDims) + " dimensions.");
417         }
418         //check that the merge windows are within the output tensor
419         for (unsigned int i = 0; i < e.m_Origin.size(); ++i)
420         {
421             if (e.m_Origin[i] + workloadInfo.m_InputTensorInfos[w].GetShape()[i]
422                 > workloadInfo.m_OutputTensorInfos[0].GetShape()[i])
423             {
424                 throw InvalidArgumentException("MergerQueueDescriptor: Window extent coordinates have to "
425                                                "be smaller or equal than the size of the output in that coord.");
426             }
427         }
428     }
429 }
430
431 //---------------------------------------------------------------
432 void FullyConnectedQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
433 {
434     ValidateSingleInput(workloadInfo, "FullyConnectedQueueDescriptor");
435     ValidateSingleOutput(workloadInfo, "FullyConnectedQueueDescriptor");
436     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", 2, "output");
437
438     if (!(workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 2 ||
439           workloadInfo.m_InputTensorInfos[0].GetNumDimensions() == 4))
440     {
441         throw InvalidArgumentException("FullyConnectedQueueDescriptor: Input tensor must have 2 or 4 dimensions.");
442     }
443
444     if (m_Weight == nullptr)
445     {
446         throw InvalidArgumentException("FullyConnectedQueueDescriptor: Weight tensor descriptor is missing.");
447     }
448
449     ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor", 2, "weight");
450
451     if (m_Parameters.m_BiasEnabled)
452     {
453         if (m_Bias == nullptr)
454         {
455             throw InvalidArgumentException("FullyConnectedQueueDescriptor: Bias is enabled but "
456                                            "bias value tensor descriptor is missing.");
457         }
458
459         // validate type and quantization values
460         ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
461             workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "FullyConnectedQueueDescriptor");
462
463         ValidateTensorDataType(m_Bias->GetTensorInfo(),
464                                GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
465                                "FullyConnectedQueueDescriptor", "bias");
466
467         ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "FullyConnectedQueueDescriptor", 1, "bias");
468     }
469
470     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
471         workloadInfo.m_OutputTensorInfos[0], "FullyConnectedQueueDescriptor", "input", "weights", "output");
472 }
473
474 //---------------------------------------------------------------
475 void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
476 {
477     ValidateSingleInput(workloadInfo, "NormalizationQueueDescriptor");
478     ValidateSingleOutput(workloadInfo, "NormalizationQueueDescriptor");
479     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
480                               workloadInfo.m_OutputTensorInfos[0],
481                               "NormalizationQueueDescriptor",
482                               "input",
483                               "output");
484 }
485
486 void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
487 {
488     ValidateTwoInputs(workloadInfo, "AdditionQueueDescriptor");
489     ValidateSingleOutput(workloadInfo, "AdditionQueueDescriptor");
490
491     ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
492                                        workloadInfo.m_InputTensorInfos[1],
493                                        workloadInfo.m_OutputTensorInfos[0],
494                                        "AdditionQueueDescriptor",
495                                        "first input",
496                                        "second input");
497
498 }
499
500 //---------------------------------------------------------------
501 void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
502 {
503     ValidateTwoInputs(workloadInfo, "MultiplicationQueueDescriptor");
504     ValidateSingleOutput(workloadInfo, "MultiplicationQueueDescriptor");
505     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
506                               workloadInfo.m_InputTensorInfos[1],
507                               "MultiplicationQueueDescriptor",
508                               "first input",
509                               "second input");
510     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
511                               workloadInfo.m_OutputTensorInfos[0],
512                               "MultiplicationQueueDescriptor",
513                               "input",
514                               "output");
515 }
516
517 void BatchNormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
518 {
519     ValidateSingleInput(workloadInfo, "BatchNormalizationQueueDescriptor");
520     ValidateSingleOutput(workloadInfo, "BatchNormalizationQueueDescriptor");
521     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
522                               workloadInfo.m_OutputTensorInfos[0],
523                               "BatchNormalizationQueueDescriptor",
524                               "input",
525                               "output");
526     ValidatePointer(m_Mean, "BatchNormalizationQueueDescriptor", "mean");
527     ValidatePointer(m_Variance, "BatchNormalizationQueueDescriptor", "variance");
528     ValidatePointer(m_Beta, "BatchNormalizationQueueDescriptor", "beta");
529     ValidatePointer(m_Gamma, "BatchNormalizationQueueDescriptor", "gamma");
530
531
532     ValidateTensorNumDimensions(m_Mean->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "mean");
533     ValidateTensorNumDimensions(m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "variance");
534     ValidateTensorNumDimensions(m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "beta");
535     ValidateTensorNumDimensions(m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", 1, "gamma");
536
537     ValidateTensorShapesMatch(
538         m_Mean->GetTensorInfo(), m_Variance->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "variance");
539     ValidateTensorShapesMatch(
540         m_Mean->GetTensorInfo(), m_Beta->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "beta");
541     ValidateTensorShapesMatch(
542         m_Mean->GetTensorInfo(), m_Gamma->GetTensorInfo(), "BatchNormalizationQueueDescriptor", "mean", "gamma");
543 }
544
545 void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
546 {
547     ValidateSingleInput(workloadInfo, "Convolution2dQueueDescriptor");
548     ValidateSingleOutput(workloadInfo, "Convolution2dQueueDescriptor");
549
550     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "input");
551     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", 4, "output");
552
553     ValidatePointer(m_Weight, "Convolution2dQueueDescriptor", "weight");
554     ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor", 4, "weight");
555     ValidateTensorDataType(m_Weight->GetTensorInfo(), workloadInfo.m_InputTensorInfos[0].GetDataType(),
556         "Convolution2dQueueDescriptor", "weight");
557     if (m_Parameters.m_BiasEnabled)
558     {
559         ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "Convolution2dQueueDescriptor", 1, "bias");
560         ValidateTensorDataType(m_Bias->GetTensorInfo(),
561                                GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
562                                "Convolution2dQueueDescriptor", "bias");
563         ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
564             workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "Convolution2dQueueDescriptor");
565     }
566
567     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
568         workloadInfo.m_OutputTensorInfos[0], "Convolution2dQueueDescriptor", "input", "weights", "output");
569 }
570
571 void DepthwiseConvolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
572 {
573     ValidateSingleInput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
574     ValidateSingleOutput(workloadInfo, "DepthwiseConvolution2dQueueDescriptor");
575
576     ValidateTensorNumDimensions(
577         workloadInfo.m_InputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "input");
578     ValidateTensorNumDimensions(
579         workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", 4, "output");
580
581     ValidatePointer(m_Weight, "DepthwiseConvolution2dQueueDescriptor", "weight");
582     ValidateTensorNumDimensions(m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 4, "weight");
583
584     //inputChannels * channelMultiplier should be equal to outputChannels
585     const unsigned int numWeightChannelMultiplier = m_Weight->GetTensorInfo().GetShape()[0];
586     const unsigned int numWeightInputChannels = m_Weight->GetTensorInfo().GetShape()[1];
587     const unsigned int numWeightOutputChannels = workloadInfo.m_OutputTensorInfos[0].GetShape()[1];
588     if (numWeightChannelMultiplier * numWeightInputChannels != numWeightOutputChannels)
589     {
590         throw InvalidArgumentException(
591             boost::str(boost::format("DepthwiseConvolution2dQueueDescriptor: output_channels (provided %1%) should be "
592                                      "equal to input_channels (provided %2%) multiplied by channel_multiplier "
593                                      "(provided %3%).")
594                                      % numWeightOutputChannels % numWeightInputChannels % numWeightChannelMultiplier));
595     }
596
597     if (m_Parameters.m_BiasEnabled)
598     {
599         ValidatePointer(m_Bias, "DepthwiseConvolution2dQueueDescriptor", "bias");
600         ValidateTensorNumDimensions(m_Bias->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor", 1, "bias");
601         ValidateBiasTensorQuantization(m_Bias->GetTensorInfo(),
602             workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(), "DepthwiseConvolution2dQueueDescriptor");
603
604         ValidateTensorDataType(m_Bias->GetTensorInfo(),
605                                GetBiasDataType(workloadInfo.m_InputTensorInfos[0].GetDataType()),
606                                "DepthwiseConvolution2dQueueDescriptor", "bias");
607     }
608
609     ValidateTensorQuantizationMultiplier(workloadInfo.m_InputTensorInfos[0], m_Weight->GetTensorInfo(),
610         workloadInfo.m_OutputTensorInfos[0], "DepthwiseConvolution2dQueueDescriptor", "input", "weights", "output");
611 }
612
613 void PermuteQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
614 {
615     ValidateSingleInput(workloadInfo, "PermuteQueueDescriptor");
616     ValidateSingleOutput(workloadInfo, "PermuteQueueDescriptor");
617
618     const PermutationVector& mapping = m_Parameters.m_DimMappings;
619
620     const TensorInfo& input  = workloadInfo.m_InputTensorInfos[0];
621     const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
622
623     ValidateTensorNumDimensions(input, "PermuteQueueDescriptor", mapping.GetSize(), "input");
624     ValidateTensorNumDimensions(output, "PermuteQueueDescriptor", mapping.GetSize(), "output");
625
626     for (unsigned int i = 0; i < mapping.GetSize(); ++i)
627     {
628         if (input.GetShape()[i] != output.GetShape()[mapping[i]])
629         {
630             throw InvalidArgumentException("PermuteQueueDescriptor: src dimension " + to_string(i) +
631                                                " (=" + to_string(input.GetShape()[i]) + ") " +
632                                                "must match dst dimension " + to_string(mapping[i]) +
633                                                " (=" + to_string(output.GetShape()[mapping[i]]) + ")");
634         }
635     }
636 }
637
638 void Pooling2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
639 {
640     ValidateSingleInput(workloadInfo, "Pooling2dQueueDescriptor");
641     ValidateSingleOutput(workloadInfo, "Pooling2dQueueDescriptor");
642
643     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "input");
644     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "Pooling2dQueueDescriptor", 4, "output");
645 }
646
647 void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
648 {
649     ValidateSingleInput(workloadInfo, "ResizeBilinearQueueDescriptor");
650     ValidateSingleOutput(workloadInfo, "ResizeBilinearQueueDescriptor");
651
652     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "input");
653     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
654
655     // Resize bilinear only changes width and height: batch and channel count must match
656     {
657         const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
658         const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
659         if (inputBatchSize != outputBatchSize)
660         {
661             throw InvalidArgumentException(
662                 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
663                     "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
664         }
665     }
666
667     {
668         const unsigned int inputChannelCount = workloadInfo.m_InputTensorInfos[0].GetShape()[1];
669         const unsigned int outputChannelCount = workloadInfo.m_OutputTensorInfos[0].GetShape()[1];
670         if (inputChannelCount != outputChannelCount)
671         {
672             throw InvalidArgumentException(
673                 boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
674                     "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
675         }
676     }
677 }
678
679 void FakeQuantizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
680 {
681     ValidateSingleInput(workloadInfo, "FakeQuantizationQueueDescriptor");
682     ValidateSingleOutput(workloadInfo, "FakeQuantizationQueueDescriptor");
683
684     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "input");
685     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "FakeQuantizationQueueDescriptor", 2, "output");
686     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
687         workloadInfo.m_OutputTensorInfos[0],
688         "FakeQuantizationQueueDescriptor",
689         "input",
690         "output");
691     if (m_Parameters.m_Min > m_Parameters.m_Max)
692     {
693         throw InvalidArgumentException("FakeQuantizationQueueDescriptor: min cannot be greater than max");
694     }
695
696 }
697
698 void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
699 {
700     ValidateSingleInput(workloadInfo, "L2NormalizationQueueDescriptor");
701     ValidateSingleOutput(workloadInfo, "L2NormalizationQueueDescriptor");
702
703     ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "input");
704     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "L2NormalizationQueueDescriptor", 4, "output");
705     ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
706         workloadInfo.m_OutputTensorInfos[0],
707         "L2NormalizationQueueDescriptor",
708         "input",
709         "output");
710 }
711
712 void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
713 {
714     ValidateNoInputs(workloadInfo, "ConstantQueueDescriptor");
715     ValidateSingleOutput(workloadInfo, "ConstantQueueDescriptor");
716
717     if (!m_LayerOutput)
718     {
719         throw InvalidArgumentException("ConstantQueueDescriptor: No const input specified");
720     }
721
722     ValidateTensorShapesMatch(m_LayerOutput->GetTensorInfo(),
723         workloadInfo.m_OutputTensorInfos[0],
724         "ConstantQueueDescriptor",
725         "constant",
726         "output");
727 }
728
729 void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
730 {
731     ValidateSingleInput(workloadInfo, "ReshapeQueueDescriptor");
732     ValidateSingleOutput(workloadInfo, "ReshapeQueueDescriptor");
733
734     if (workloadInfo.m_InputTensorInfos[0].GetNumElements() != workloadInfo.m_OutputTensorInfos[0].GetNumElements())
735     {
736         throw InvalidArgumentException("ReshapeQueueDescriptor: Input tensor has " +
737             to_string(workloadInfo.m_InputTensorInfos[0].GetNumElements()) + " but output tensor has " +
738             to_string(workloadInfo.m_OutputTensorInfos[0].GetNumElements()) + " elements.");
739     }
740 }
741
742 void FloorQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
743 {
744     ValidateSingleInput(workloadInfo, "FloorQueueDescriptor");
745     ValidateSingleOutput(workloadInfo, "FlootQueueDescriptor");
746
747     if (workloadInfo.m_InputTensorInfos[0] != workloadInfo.m_OutputTensorInfos[0])
748     {
749         throw InvalidArgumentException("FloorQueueDescriptor: Input and output tensor infos do not match.");
750     }
751 }
752
753 } //namespace armnn