/// (Optional) Register TensorHandleFactories
/// Either this method or CreateMemoryManager() and
/// IWorkloadFactory::CreateTensor()/IWorkloadFactory::CreateSubtensor() methods must be implemented.
- virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) {}
+ virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& /*registry*/) {}
/// Returns the version of the Backend API
static constexpr BackendVersion GetApiVersion() { return BackendVersion(1, 0); }
#include <armnn/MemorySources.hpp>
+#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
/// \param memory base address of the memory being imported.
/// \param source source of the allocation for the memory being imported.
/// \return true on success or false on failure
- virtual bool Import(void* memory, MemorySource source) { return false; };
+ virtual bool Import(void* memory, MemorySource source)
+ {
+ boost::ignore_unused(memory, source);
+ return false;
+ };
};
}
ARMNN_NO_DEPRECATE_WARN_BEGIN
IBackendInternal::ISubGraphConverterPtr IBackendInternal::CreateSubGraphConverter(
- const std::shared_ptr<SubGraph>& subGraph) const
+ const std::shared_ptr<SubGraph>& /*subGrapg*/) const
{
return ISubGraphConverterPtr{};
}
return Optimizations{};
}
-IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& subGraph,
+IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& /*subGraph*/,
bool& optimizationAttempted) const
{
optimizationAttempted = false;
}
IBackendInternal::IWorkloadFactoryPtr IBackendInternal::CreateWorkloadFactory(
- class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
+ class TensorHandleFactoryRegistry& /*tensorHandleFactoryRegistry*/) const
{
return IWorkloadFactoryPtr{};
}
namespace armnn
{
-bool LayerSupportBase::IsAbsSupported(const TensorInfo &input,
- const TensorInfo &output,
+bool LayerSupportBase::IsAbsSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsActivationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const ActivationDescriptor& descriptor,
+bool LayerSupportBase::IsActivationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const ActivationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsAdditionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
- const armnn::ArgMinMaxDescriptor& descriptor,
+bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &/*input*/,
+ const armnn::TensorInfo &/*output*/,
+ const armnn::ArgMinMaxDescriptor& /*descriptor*/,
armnn::Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& mean,
- const TensorInfo& var,
- const TensorInfo& beta,
- const TensorInfo& gamma,
- const BatchNormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const TensorInfo& /*mean*/,
+ const TensorInfo& /*var*/,
+ const TensorInfo& /*beta*/,
+ const TensorInfo& /*gamma*/,
+ const BatchNormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& input,
- const TensorInfo& output,
- const BatchToSpaceNdDescriptor& descriptor,
+bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const BatchToSpaceNdDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsComparisonSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- const ComparisonDescriptor& descriptor,
+bool LayerSupportBase::IsComparisonSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
+ const ComparisonDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
- const TensorInfo& output,
- const OriginsDescriptor& descriptor,
+bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> /*inputs*/,
+ const TensorInfo& /*output*/,
+ const OriginsDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
+bool LayerSupportBase::IsConstantSupported(const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const Convolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsDebugSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthToSpaceDescriptor& descriptor,
+bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const DepthToSpaceDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const DepthwiseConvolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
- const TensorInfo& scores,
- const TensorInfo& anchors,
- const TensorInfo& detectionBoxes,
- const TensorInfo& detectionClasses,
- const TensorInfo& detectionScores,
- const TensorInfo& numDetections,
- const DetectionPostProcessDescriptor& descriptor,
+bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& /*boxEncodings*/,
+ const TensorInfo& /*scores*/,
+ const TensorInfo& /*anchors*/,
+ const TensorInfo& /*detectionBoxes*/,
+ const TensorInfo& /*detectionClasses*/,
+ const TensorInfo& /*detectionScores*/,
+ const TensorInfo& /*numDetections*/,
+ const DetectionPostProcessDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
- const TensorInfo& output,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const DepthwiseConvolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsDivisionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& input0,
- const armnn::TensorInfo& input1,
- const armnn::TensorInfo& output,
+bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/,
+ const armnn::TensorInfo& /*input1*/,
+ const armnn::TensorInfo& /*output*/,
armnn::Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& input,
- const FakeQuantizationDescriptor& descriptor,
+bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& /*input*/,
+ const FakeQuantizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFloorSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsFloorSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TensorInfo& weights,
- const TensorInfo& biases,
- const FullyConnectedDescriptor& descriptor,
+bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const TensorInfo& /*weights*/,
+ const TensorInfo& /*biases*/,
+ const FullyConnectedDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& input0,
- const armnn::TensorInfo& input1,
- const armnn::TensorInfo& output,
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& /*input0*/,
+ const armnn::TensorInfo& /*input1*/,
+ const armnn::TensorInfo& /*output*/,
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsGreaterSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsGreaterSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
+bool LayerSupportBase::IsInputSupported(const TensorInfo& /*input*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const InstanceNormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const InstanceNormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const L2NormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const L2NormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& input,
- const TensorInfo& output,
- const LogSoftmaxDescriptor& descriptor,
+bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const LogSoftmaxDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsLstmSupported(const TensorInfo& input,
- const TensorInfo& outputStateIn,
- const TensorInfo& cellStateIn,
- const TensorInfo& scratchBuffer,
- const TensorInfo& outputStateOut,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const LstmDescriptor& descriptor,
- const LstmInputParamsInfo& paramsInfo,
+bool LayerSupportBase::IsLstmSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*outputStateIn*/,
+ const TensorInfo& /*cellStateIn*/,
+ const TensorInfo& /*scratchBuffer*/,
+ const TensorInfo& /*outputStateOut*/,
+ const TensorInfo& /*cellStateOut*/,
+ const TensorInfo& /*output*/,
+ const LstmDescriptor& /*descriptor*/,
+ const LstmInputParamsInfo& /*paramsInfo*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMaximumSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMaximumSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMeanSupported(const TensorInfo& input,
- const TensorInfo& output,
- const MeanDescriptor& descriptor,
+bool LayerSupportBase::IsMeanSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const MeanDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& input,
- const armnn::TensorInfo& output,
- armnn::Optional<std::string &> reasonIfUnsupported) const
+bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& /*input*/,
+ const armnn::TensorInfo& /*output*/,
+ armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
{
- boost::ignore_unused(input);
- boost::ignore_unused(output);
return true;
}
-bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& input,
- const armnn::TensorInfo& output,
- armnn::Optional<std::string &> reasonIfUnsupported) const
+bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& /*input*/,
+ const armnn::TensorInfo& /*output*/,
+ armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
{
- boost::ignore_unused(input);
- boost::ignore_unused(output);
return true;
}
-bool LayerSupportBase::IsMergeSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMergeSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMinimumSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& input,
- const TensorInfo& output,
- const NormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const NormalizationDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsOutputSupported(const TensorInfo& output,
+bool LayerSupportBase::IsOutputSupported(const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPadSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PadDescriptor& descriptor,
+bool LayerSupportBase::IsPadSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const PadDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPermuteSupported(const TensorInfo& input,
- const TensorInfo& output,
- const PermuteDescriptor& descriptor,
+bool LayerSupportBase::IsPermuteSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const PermuteDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Pooling2dDescriptor& descriptor,
+bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const Pooling2dDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input,
- const PreCompiledDescriptor& descriptor,
+bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& /*input*/,
+ const PreCompiledDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsPreluSupported(const TensorInfo& input,
- const TensorInfo& alpha,
- const TensorInfo& output,
+bool LayerSupportBase::IsPreluSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*alpha*/,
+ const TensorInfo& /*output*/,
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
- const armnn::TensorInfo& output,
+bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& /*input*/,
+ const armnn::TensorInfo& /*output*/,
armnn::Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& input,
- const TensorInfo& previousCellStateIn,
- const TensorInfo& previousOutputIn,
- const TensorInfo& cellStateOut,
- const TensorInfo& output,
- const QuantizedLstmInputParamsInfo& paramsInfo,
+bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*previousCellStateIn*/,
+ const TensorInfo& /*previousOutputIn*/,
+ const TensorInfo& /*cellStateOut*/,
+ const TensorInfo& /*output*/,
+ const QuantizedLstmInputParamsInfo& /*paramsInfo*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
- const ReshapeDescriptor& descriptor,
+bool LayerSupportBase::IsReshapeSupported(const TensorInfo& /*input*/,
+ const ReshapeDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input,
- const TensorInfo& output,
+bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsResizeSupported(const TensorInfo& input,
- const TensorInfo& output,
- const ResizeDescriptor& descriptor,
+bool LayerSupportBase::IsResizeSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const ResizeDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
- const TensorInfo &output,
+bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &/*input*/,
+ const TensorInfo &/*output*/,
Optional<std::string &> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSliceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SliceDescriptor& descriptor,
+bool LayerSupportBase::IsSliceSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SliceDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SoftmaxDescriptor& descriptor,
+bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SoftmaxDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-
-bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToBatchNdDescriptor& descriptor,
+/**/
+bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SpaceToBatchNdDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& input,
- const TensorInfo& output,
- const SpaceToDepthDescriptor& descriptor,
+bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const SpaceToDepthDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
- const ViewsDescriptor& descriptor,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
+ const ViewsDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
- const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
- const ViewsDescriptor& descriptor,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
+ const std::vector<std::reference_wrapper<TensorInfo>>& /*outputs*/,
+ const ViewsDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
- const TensorInfo& output,
- const StackDescriptor& descriptor,
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& /*inputs*/,
+ const TensorInfo& /*output*/,
+ const StackDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
- const std::vector<const TensorInfo*>& outputs,
- const StandInDescriptor& descriptor,
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& /*inputs*/,
+ const std::vector<const TensorInfo*>& /*outputs*/,
+ const StandInDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
if (reasonIfUnsupported)
return false;
}
-bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
- const TensorInfo& output,
- const StridedSliceDescriptor& descriptor,
+bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const StridedSliceDescriptor& /*descriptor*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
+bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsSwitchSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output0,
- const TensorInfo& output1,
+bool LayerSupportBase::IsSwitchSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output0*/,
+ const TensorInfo& /*output1*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
-bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const TransposeConvolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const TransposeConvolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
};
template<typename T>
-bool AllTypesAreEqualImpl(T t)
+bool AllTypesAreEqualImpl(T)
{
return true;
}
const WorkloadInfo& info,
Args&&... args)
{
+ boost::ignore_unused(descriptor);
+ boost::ignore_unused(info);
+ boost::ignore_unused(args...);
return nullptr;
}
};
virtual profiling::ProfilingGuid GetGuid() const = 0;
- virtual void RegisterDebugCallback(const DebugCallbackFunction& func) {}
+ virtual void RegisterDebugCallback(const DebugCallbackFunction& /*func*/) {}
};
// NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template
"output_1");
}
-void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
{
// This is internally generated so it should not need validation.
}
}
// Default Implementations
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
- const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
- const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
- const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
- const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
+ const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
- const InstanceNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+ const InstanceNormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo&/**/ /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
- const WorkloadInfo &info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
+ const WorkloadInfo &/*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*Info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+/**/
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
- const TransposeConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+ const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
bool SupportsSubTensors() const override
{ return false; };
- std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, TensorShape const& subTensorShape,
- unsigned int const *subTensorOrigin) const override
+ std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& /*parent*/,
+ TensorShape const& /*subTensorShape*/,
+ unsigned int const */*subTensorOrigin*/) const override
{ return nullptr; };
- std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
- const bool IsMemoryManaged = true) const override
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& /*tensorInfo*/,
+ const bool /*IsMemoryManaged*/) const override
{ return nullptr; }
- std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, DataLayout dataLayout,
- const bool IsMemoryManaged = true) const override
+ std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& /*tensorInfo*/,
+ DataLayout /*dataLayout*/,
+ const bool /*IsMemoryManaged*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override
+ std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
- std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override
+ std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const override
{ return nullptr; }
};
static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
unsigned int nIn, unsigned int nOut) \
{ \
+ boost::ignore_unused(factory, nIn, nOut); \
return std::unique_ptr<armnn::IWorkload>(); \
} \
};
}
IBackendInternal::IWorkloadFactoryPtr MockBackend::CreateWorkloadFactory(
- const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
+ const IBackendInternal::IMemoryManagerSharedPtr& /*memoryManager*/) const
{
return IWorkloadFactoryPtr{};
}
class MockLayerSupport : public LayerSupportBase {
public:
- bool IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsInputSupported(const TensorInfo& /*input*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
- bool IsOutputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsOutputSupported(const TensorInfo& /*input*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
- bool IsAdditionSupported(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsAdditionSupported(const TensorInfo& /*input0*/,
+ const TensorInfo& /*input1*/,
+ const TensorInfo& /*output*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
- bool IsConvolution2dSupported(const TensorInfo& input,
- const TensorInfo& output,
- const Convolution2dDescriptor& descriptor,
- const TensorInfo& weights,
- const Optional<TensorInfo>& biases,
- Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+ bool IsConvolution2dSupported(const TensorInfo& /*input*/,
+ const TensorInfo& /*output*/,
+ const Convolution2dDescriptor& /*descriptor*/,
+ const TensorInfo& /*weights*/,
+ const Optional<TensorInfo>& /*biases*/,
+ Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
{
return true;
}
#include <armnn/backends/IBackendInternal.hpp>
+#include <boost/core/ignore_unused.hpp>
+
constexpr const char* TestDynamicBackendId()
{
#if defined(VALID_TEST_DYNAMIC_BACKEND_1)
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager) const override
{
+ boost::ignore_unused(memoryManager);
return IWorkloadFactoryPtr{};
}
ILayerSupportSharedPtr GetLayerSupport() const override
return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
}
-bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
+template<typename ... Args>
+bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
+ boost::ignore_unused(reasonIfUnsupported, (args)...);
#if defined(ARMCOMPUTECL_ENABLED)
return true;
#else
return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
#else
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
- return IsClBackendSupported(reasonIfUnsupported);
+ return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
#endif
template<typename FloatFunc, typename Uint8Func, typename ... Params>
bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsClBackendSupported(reasonIfUnsupported);
+ return IsClBackendSupported(reasonIfUnsupported, input);
}
bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsClBackendSupported(reasonIfUnsupported);
+ return IsClBackendSupported(reasonIfUnsupported, output);
}
bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
*splitAxis.begin());
}
#endif
+ boost::ignore_unused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
+#include <boost/core/ignore_unused.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/format.hpp>
std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
+ boost::ignore_unused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
DataLayout dataLayout,
const bool IsMemoryManaged) const
{
+ boost::ignore_unused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
#include <string>
#include <sstream>
+#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
const cl_event * event_wait_list,
cl_event * event)
{
+ boost::ignore_unused(event);
cl_int retVal = 0;
// Get the name of the kernel
namespace
{
-bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
+template< typename ... Args>
+bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
+ boost::ignore_unused((args)...);
#if defined(ARMCOMPUTENEON_ENABLED)
return true;
#else
return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
#else
#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
- return IsNeonBackendSupported(reasonIfUnsupported);
+ return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
#endif
#if defined(ARMCOMPUTENEON_ENABLED)
bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsNeonBackendSupported(reasonIfUnsupported);
+ return IsNeonBackendSupported(reasonIfUnsupported, input);
}
bool NeonLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsNeonBackendSupported(reasonIfUnsupported);
+ return IsNeonBackendSupported(reasonIfUnsupported, output);
}
bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
*splitAxis.begin());
}
#endif
+ boost::ignore_unused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
+
bool supported = true;
std::array<DataType,3> supportedInputTypes =
reasonIfUnsupported);
}
-bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
- Optional<std::string&> reasonIfUnsupported) const
+bool RefLayerSupport::IsInputSupported(const TensorInfo& /*input*/,
+ Optional<std::string&> /*reasonIfUnsupported*/) const
{
return true;
}
return supported;
}
-bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
- Optional<std::string&> reasonIfUnsupported) const
+bool RefLayerSupport::IsOutputSupported(const TensorInfo& /*output*/,
+ Optional<std::string&> /*reasonIfUnsupported*/) const
{
return true;
}
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType, 3> supportedTypes =
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
+ boost::ignore_unused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
}
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
- const bool IsMemoryManaged) const
+ const bool isMemoryManaged) const
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
+ boost::ignore_unused(isMemoryManaged);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout,
- const bool IsMemoryManaged) const
+ const bool isMemoryManaged) const
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
+ boost::ignore_unused(isMemoryManaged, dataLayout);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
+ boost::ignore_unused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
+ boost::ignore_unused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
return std::make_unique<RefPooling2dWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+ const WorkloadInfo& /*info*/) const
{
return nullptr;
}
static armnn::RefWorkloadFactory GetFactory(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
{
+ boost::ignore_unused(memoryManager);
return armnn::RefWorkloadFactory();
}
};
void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
{
+ boost::ignore_unused(outputTensorInfo);
+
unsigned int uAxis = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
const unsigned int outerElements = armnnUtils::GetNumElementsBetween(inputTensorInfo.GetShape(), 0, uAxis);
#include "Dequantize.hpp"
+#include <boost/core/ignore_unused.hpp>
namespace armnn
{
const TensorInfo& inputInfo,
const TensorInfo& outputInfo)
{
+ boost::ignore_unused(outputInfo);
BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
float* detectionScores,
float* numDetections)
{
+ boost::ignore_unused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
+
// Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
// which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
std::vector<float> boxCorners(boxEncodingsInfo.GetNumElements());
#include <backendsCommon/WorkloadData.hpp>
+#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
const int32_t* indices,
Encoder<float>& output)
{
+ boost::ignore_unused(outputInfo);
const TensorShape& paramsShape = paramsInfo.GetShape();
unsigned int paramsProduct = 1;
{
case PoolingAlgorithm::Max:
{
- return [](float & accumulated, float kernelSize) {};
+ return [](float & /*accumulated*/, float /*kernelSize*/) {};
}
case PoolingAlgorithm::Average: