#pragma once
#include <armnn/MemorySources.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
/// \return true on success or false on failure
virtual bool Import(void* memory, MemorySource source)
{
- boost::ignore_unused(memory, source);
+ IgnoreUnused(memory, source);
return false;
};
};
#pragma once
+#include "ITensorHandle.hpp"
+
#include <armnn/IRuntime.hpp>
#include <armnn/MemorySources.hpp>
#include <armnn/Types.hpp>
-#include "ITensorHandle.hpp"
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
return CreateTensorHandle(tensorInfo);
}
DataLayout dataLayout,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
return CreateTensorHandle(tensorInfo, dataLayout);
}
#include "DynamicQuantizationVisitor.hpp"
#include "NetworkUtils.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
SetRange(layer, 0, -20.f, 20.f);
AddToCalibratedLayers(layer);
}
const ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(mean);
- boost::ignore_unused(variance);
- boost::ignore_unused(beta);
- boost::ignore_unused(gamma);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(mean);
+ IgnoreUnused(variance);
+ IgnoreUnused(beta);
+ IgnoreUnused(gamma);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(convolution2dDescriptor);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(convolution2dDescriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
const ActivationDescriptor& activationDescriptor,
const char* name)
{
- boost::ignore_unused(name, activationDescriptor);
+ IgnoreUnused(name, activationDescriptor);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
const Optional<ConstTensor>& biases,
const char *name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
const PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(permuteDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(permuteDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(spaceToBatchNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(spaceToBatchNdDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(pooling2dDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(pooling2dDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(softmaxDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(softmaxDescriptor);
+ IgnoreUnused(name);
SetRange(layer, 0, 0.f, 1.f);
AddToCalibratedLayers(layer);
}
const ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (input.GetDataType() != DataType::Float32)
{
const ConcatDescriptor& originsDescriptor,
const char* name)
{
- boost::ignore_unused(name);
- boost::ignore_unused(originsDescriptor);
+ IgnoreUnused(name);
+ IgnoreUnused(originsDescriptor);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(reshapeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(reshapeDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const SplitterDescriptor& splitterDescriptor,
const char* name)
{
- boost::ignore_unused(splitterDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(splitterDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const ResizeBilinearDescriptor& resizeDesc,
const char* name)
{
- boost::ignore_unused(resizeDesc);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDesc);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(stridedSliceDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(stridedSliceDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
- boost::ignore_unused(batchToSpaceNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(batchToSpaceNdDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(id);
- boost::ignore_unused(name);
+ IgnoreUnused(id);
+ IgnoreUnused(name);
SetRange(layer, 0, -0.0f, 0.0f);
AddToCalibratedLayers(layer);
}
void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(id);
- boost::ignore_unused(name);
+ IgnoreUnused(id);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
m_OutputLayers.push_back(id);
}
IExecutionFrame* ExecutionFrame::ExecuteWorkloads(IExecutionFrame* previousFrame)
{
- boost::ignore_unused(previousFrame);
+ IgnoreUnused(previousFrame);
for (auto& workload: m_WorkloadQueue)
{
workload->Execute();
const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
{
- boost::ignore_unused(layer);
+ IgnoreUnused(layer);
BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
graph.m_Layers.erase(layerIt);
const size_t numErased = graph.m_PosInGraphMap.erase(this);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
~LayerInGraph() override
{
const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
};
~LayerInGraph() override
{
const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
};
, m_BackendHint(EmptyOptional())
, m_Guid(profiling::ProfilingService::Instance().NextGuid())
{
- boost::ignore_unused(layout);
+ IgnoreUnused(layout);
m_InputSlots.reserve(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; ++i)
{
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <algorithm>
#include <memory>
#include <list>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/cast.hpp>
namespace armnn
//
#pragma once
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Optional.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
template<typename ... Params>
bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(reasonIfUnsupported);
- boost::ignore_unused(params...);
+ IgnoreUnused(reasonIfUnsupported);
+ IgnoreUnused(params...);
return true;
}
template<typename ... Params>
bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(reasonIfUnsupported);
- boost::ignore_unused(params...);
+ IgnoreUnused(reasonIfUnsupported);
+ IgnoreUnused(params...);
return false;
}
template<typename ... Params>
bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
return false;
}
template<typename ... Params>
bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
return false;
}
template<typename ... Params>
bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
return false;
}
template<typename ... Params>
bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
return false;
}
template<typename ... Params>
bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
return false;
}
template<typename ... Params>
bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
return false;
}
template<typename ... Params>
bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
return false;
}
template<typename ... Params>
bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
return false;
}
std::string reasonIfUnsupported;
BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
"Factory does not support layer");
- boost::ignore_unused(reasonIfUnsupported);
+ IgnoreUnused(reasonIfUnsupported);
return *workloadFactory;
}
// Copyright © 2019 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <armnn/Logging.hpp>
-
+#include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Utils.hpp>
#if defined(_MSC_VER)
#endif
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <iostream>
namespace armnn
public:
void Consume(const std::string& s) override
{
- boost::ignore_unused(s);
+ IgnoreUnused(s);
#if defined(_MSC_VER)
OutputDebugString(s.c_str());
OutputDebugString("\n");
#elif defined(__ANDROID__)
__android_log_write(ANDROID_LOG_DEBUG, "armnn", s.c_str());
#else
- boost::ignore_unused(s);
+ IgnoreUnused(s);
#endif
}
};
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <ProfilingService.hpp>
OutputSlot& slot,
TensorHandleFactoryRegistry& registry)
{
- boost::ignore_unused(backends, slot, registry);
+ IgnoreUnused(backends, slot, registry);
return ITensorHandleFactory::DeferredFactoryId;
}
#include "NetworkQuantizerUtils.hpp"
#include "Layer.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (m_LayerId == id)
{
m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
#include "Profiling.hpp"
#include <armnn/BackendId.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include "JsonPrinter.hpp"
#include <stack>
#include <boost/algorithm/string.hpp>
-#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
- boost::ignore_unused(parent);
+ IgnoreUnused(parent);
BOOST_ASSERT(event->GetParentEvent() == parent);
#if ARMNN_STREAMLINE_ENABLED
#include "ProfilingEvent.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include "armnn/IProfiler.hpp"
#include "WallClockTimer.hpp"
#include <stack>
#include <map>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
void ConstructNextInVector(std::vector<InstrumentPtr>& instruments)
{
- boost::ignore_unused(instruments);
+ IgnoreUnused(instruments);
}
template<typename Arg, typename... Args>
#include "StaticRangeVisitor.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
SetRange(layer, 0, -20.f, 20.f);
}
const ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(mean);
- boost::ignore_unused(variance);
- boost::ignore_unused(beta);
- boost::ignore_unused(gamma);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(mean);
+ IgnoreUnused(variance);
+ IgnoreUnused(beta);
+ IgnoreUnused(gamma);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(convolution2dDescriptor);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(convolution2dDescriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
const ActivationDescriptor& activationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
const Optional<ConstTensor>& biases,
const char *name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
const PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(permuteDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(permuteDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(spaceToBatchNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(spaceToBatchNdDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(pooling2dDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(pooling2dDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(softmaxDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(softmaxDescriptor);
+ IgnoreUnused(name);
SetRange(layer, 0, 0.f, 1.f);
}
const OriginsDescriptor& originsDescriptor,
const char* name)
{
- boost::ignore_unused(originsDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(originsDescriptor);
+ IgnoreUnused(name);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
const ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (input.GetDataType() != DataType::Float32)
{
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(reshapeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(reshapeDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const SplitterDescriptor& splitterDescriptor,
const char* name)
{
- boost::ignore_unused(splitterDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(splitterDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const ResizeBilinearDescriptor& resizeDesc,
const char* name)
{
- boost::ignore_unused(resizeDesc);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDesc);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const ResizeDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(resizeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(stridedSliceDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(stridedSliceDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
- boost::ignore_unused(batchToSpaceNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(batchToSpaceNdDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
#include "SubgraphView.hpp"
#include "Graph.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <utility>
namespace armnn
std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
{
// Ignore unused for release builds
- boost::ignore_unused(errorMessage);
+ IgnoreUnused(errorMessage);
// Check if the item is valid
BOOST_ASSERT_MSG(i, errorMessage.c_str());
#include "SubgraphViewSelector.hpp"
#include "Graph.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
#include <algorithm>
#include <map>
{
size_t numErased = a->m_Dependants.erase(this);
BOOST_ASSERT(numErased == 1);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
a->m_Dependants.insert(m_Parent);
}
for (PartialSubgraph* a : m_Dependants)
{
size_t numErased = a->m_Antecedents.erase(this);
BOOST_ASSERT(numErased == 1);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
a->m_Antecedents.insert(m_Parent);
}
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
{
// These conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemCopyQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemImportQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
const IWorkloadFactory& factory,
const bool IsMemoryManaged = true) override
{
- boost::ignore_unused(registry, factory, IsMemoryManaged);
+ IgnoreUnused(registry, factory, IsMemoryManaged);
}
/// Creates a dynamically-allocated copy of this layer.
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
}
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <numeric>
-#include <boost/core/ignore_unused.hpp>
-
using namespace armnnUtils;
namespace armnn
SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
}
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
const TensorShape& inputShape = m_Param.m_InputShape;
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
// This throws in the event that it's called. We would expect that any backend that
// "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
// during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
throw Exception("Stand in layer does not support infering output shapes");
}
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <Half.hpp>
void Run(Graph& graph, Layer& layer) const override
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
if (Predicate::Test(layer))
{
layer.OperateOnConstantTensors(Converter::Func);
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
/// Fp16ToFp32 followed by Fp32ToFp16 or vice-versa.
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
/// Bypasses both layers for that connection if one is the inverse of the other.
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
auto child = boost::polymorphic_downcast<PermuteType*>(&connection.GetOwningLayer());
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
/// the child layer, so the siblings are left unconnected (and later removed).
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
auto& child = connection.GetOwningLayer();
if (!child.IsOutputUnconnected())
#include <ResolveType.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <utility>
armnn::Graph& graph,
bool biasEnabled = false)
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
// To create a PreCompiled layer, create a network and Optimize it.
armnn::Network net;
std::vector<unsigned int> slotIndexes;
auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
{
- boost::ignore_unused(guid);
+ IgnoreUnused(guid);
slotIndexes.push_back(slotIndex);
tensorShapes.push_back(tensor->GetShape());
callCount++;
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <set>
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
}
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
}
const ActivationDescriptor& activationDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(activationDescriptor, name);
+ IgnoreUnused(activationDescriptor, name);
auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
}
#include <armnn/Optional.hpp>
#include <string>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
void PassStringRef(armnn::Optional<std::string&> value)
{
- boost::ignore_unused(value);
+ armnn::IgnoreUnused(value);
}
void PassStringRefWithDefault(armnn::Optional<std::string&> value = armnn::EmptyOptional())
{
- boost::ignore_unused(value);
+ armnn::IgnoreUnused(value);
}
} // namespace <anonymous>
#include <armnn/IRuntime.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/tools/output_test_stream.hpp>
profiler->Print(json);
std::string output = buffer.str();
- boost::ignore_unused(output);
+ armnn::IgnoreUnused(output);
// Disable profiling here to not print out anything on stdout.
profiler->EnableProfiling(false);
// SPDX-License-Identifier: MIT
//
-#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
-
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-#include <QuantizeHelper.hpp>
-
#include "../Graph.hpp"
#include "../Network.hpp"
#include "../NetworkQuantizerUtils.hpp"
#include "../RangeTracker.hpp"
#include "../../armnnQuantizer/CommandLineProcessor.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+#include <QuantizeHelper.hpp>
+
#include <boost/test/unit_test.hpp>
#include <unordered_map>
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(m_InputShape == info.GetShape());
// Based off current default [-15.0f, 15.0f]
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
const OffsetScalePair& params,
DataType dataType = DataType::QAsymmU8)
{
- boost::ignore_unused(dataType);
+ IgnoreUnused(dataType);
TestQuantizationParamsImpl(info, dataType, params.first, params.second);
}
void VisitAdditionLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-20.0f, 20.0f]
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
std::string(armnn::GetDataTypeName(info.GetDataType()))
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 3.5f]
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-1.0f, 1.0f]
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-5.0f, 15.0f]
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
const ConstTensor& gamma,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
const DepthToSpaceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(convolution2dDescriptor, name);
+ IgnoreUnused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(convolution2dDescriptor, name);
+ IgnoreUnused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
const InstanceNormalizationDescriptor& descriptor,
const char* name = nullptr)
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 1.0f]
const PermuteDescriptor& desc,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(spaceToBatchNdDescriptor, name);
+ IgnoreUnused(spaceToBatchNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
const Pooling2dDescriptor& desc,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
const ConstTensor& input,
const char* name = nullptr) override
{
- boost::ignore_unused(input, name);
+ IgnoreUnused(input, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitArgMinMaxLayer(const IConnectableLayer* layer,
const ArgMinMaxDescriptor& argMinMaxDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(argMinMaxDescriptor, name);
+ IgnoreUnused(argMinMaxDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
const ComparisonDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(originsDescriptor, name);
+ IgnoreUnused(originsDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(
outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(reshapeDescriptor, name);
+ IgnoreUnused(reshapeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
const SplitterDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(resizeDescriptor, name);
+ IgnoreUnused(resizeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
const StridedSliceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(batchToSpaceNdDescriptor, name);
+ IgnoreUnused(batchToSpaceNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
switch (id)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
void VisitPreluLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(info,
{ 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitStackLayer(const IConnectableLayer* layer,
const StackDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
const SliceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_InputShape == info.GetShape());
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_OutputShape == info.GetShape());
void VisitQuantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, name);
+ IgnoreUnused(layer, name);
m_VisitedQuantizeLayer = true;
}
void VisitDequantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, name);
+ IgnoreUnused(layer, name);
m_VisitedDequantizeLayer = true;
}
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
- boost::ignore_unused(dubious);
- boost::ignore_unused(suppressed);
+ IgnoreUnused(dubious);
+ IgnoreUnused(suppressed);
}
#endif // WITH_VALGRIND
#include <Network.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <vector>
#include <string>
-#include <boost/core/ignore_unused.hpp>
using namespace armnn;
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
{
- boost::ignore_unused(tensorInfo);
+ IgnoreUnused(tensorInfo);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const override
{
- boost::ignore_unused(tensorInfo, dataLayout);
+ IgnoreUnused(tensorInfo, dataLayout);
return nullptr;
}
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
{
- boost::ignore_unused(tensorInfo);
+ IgnoreUnused(tensorInfo);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const override
{
- boost::ignore_unused(tensorInfo, dataLayout);
+ IgnoreUnused(tensorInfo, dataLayout);
return nullptr;
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <ParserHelper.hpp>
#include <VerificationHelpers.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/format.hpp>
BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
for (auto inputBinding : m_InputBindings)
{
if (inputBinding.first == name)
BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
for (auto outputBinding : m_OutputBindings)
{
if (outputBinding.first == name)
armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
unsigned int layerIndex)
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
armnn::Pooling2dDescriptor desc;
switch (pooling2dDesc->poolType())
Deserializer::NormalizationDescriptorPtr normalizationDescriptor,
unsigned int layerIndex)
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
armnn::NormalizationDescriptor desc;
switch (normalizationDescriptor->normChannelType())
#include "ParserFlatbuffersSerializeFixture.hpp"
#include "../Deserializer.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <string>
#include <iostream>
const std::string & dataType,
const std::string & activation="NONE")
{
- boost::ignore_unused(activation);
+ armnn::IgnoreUnused(activation);
m_JsonString = R"(
{
inputIds: [0, 1],
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
#include "../Deserializer.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
#include <string>
#include <iostream>
const std::string & dataType,
const std::string & activation="NONE")
{
- boost::ignore_unused(activation);
+ armnn::IgnoreUnused(activation);
m_JsonString = R"(
{
inputIds: [0, 1],
#pragma once
#include "SchemaSerialize.hpp"
+#include "test/TensorHelpers.hpp"
+
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+#include <ArmnnSchema_generated.h>
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <ResolveType.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
-#include <ResolveType.hpp>
-#include "test/TensorHelpers.hpp"
-
-#include "flatbuffers/idl.h"
-#include "flatbuffers/util.h"
-
-#include <ArmnnSchema_generated.h>
using armnnDeserializer::IDeserializer;
using TensorRawPtr = armnnSerializer::TensorInfo*;
armnnSerializer::TensorInfo tensorType, const std::string& name,
const float scale, const int64_t zeroPoint)
{
- boost::ignore_unused(name);
+ armnn::IgnoreUnused(name);
BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
tensors->dimensions()->begin(), tensors->dimensions()->end());
#define BOOST_FILESYSTEM_NO_DEPRECATED
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
armnn::LayerBindingId id,
const char* name)
{
- boost::ignore_unused(name);
+ armnn::IgnoreUnused(name);
m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo());
}
#include <armnn/Descriptors.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <iostream>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <flatbuffers/util.h>
// Build FlatBuffer for Input Layer
void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
// Build FlatBuffer for Output Layer
void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
const armnn::ActivationDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
// Build FlatBuffer for Addition Layer
void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
const armnn::ArgMinMaxDescriptor& descriptor,
const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
const armnn::BatchToSpaceNdDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
const armnn::ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
const armnn::ComparisonDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
auto fbDescriptor = serializer::CreateComparisonDescriptor(
const armnn::ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
const armnn::DepthToSpaceDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
const armnn::ConstTensor& anchors,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
const armnn::ElementwiseUnaryDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer);
void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
m_flatBufferBuilder,
const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
const armnn::LstmInputParams& params,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
const armnn::MeanDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
const armnn::ConcatDescriptor& concatDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
const armnn::PadDescriptor& padDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
const armnn::PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
const armnn::ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
const armnn::ResizeBilinearDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
const armnn::ResizeDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
const armnn::SliceDescriptor& sliceDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
const armnn::SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
const armnn::Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
auto flatBufferDescriptor =
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer ViewOrigins
std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
const armnn::NormalizationDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
const armnn::StackDescriptor& stackDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
const armnn::StandInDescriptor& standInDescriptor,
const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
standInDescriptor.m_NumInputs,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
const armnn::TransposeDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
const armnn::QuantizedLstmInputParams& params,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
// SPDX-License-Identifier: MIT
//
+#include "../Serializer.hpp"
+
#include <armnn/Descriptors.hpp>
#include <armnn/INetwork.hpp>
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include "../Serializer.hpp"
+#include <boost/test/unit_test.hpp>
#include <sstream>
-#include <boost/core/ignore_unused.hpp>
-#include <boost/test/unit_test.hpp>
-
BOOST_AUTO_TEST_SUITE(SerializerTests)
class VerifyActivationName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
const armnn::ActivationDescriptor& activationDescriptor,
const char* name) override
{
- boost::ignore_unused(layer, activationDescriptor);
+ IgnoreUnused(layer, activationDescriptor);
BOOST_TEST(name == "activation");
}
};
#include <armnn/Exceptions.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
-#include <boost/filesystem.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
// armnnUtils:
#include <armnnUtils/Permute.hpp>
#include <flatbuffers/flexbuffers.h>
-#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
+#include <boost/filesystem.hpp>
#include <fstream>
#include <algorithm>
armnn::TensorInfo& tensorInfo,
armnn::Optional<armnn::PermutationVector&> permutationVector)
{
- boost::ignore_unused(tensorPtr);
+ IgnoreUnused(tensorPtr);
BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
BOOST_ASSERT_MSG(bufferPtr != nullptr,
boost::str(
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
- boost::ignore_unused(operatorPtr);
+ IgnoreUnused(operatorPtr);
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(inputs.size(), 1);
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <GraphTopologicalSort.hpp>
#include <ParserHelper.hpp>
#include <tensorflow/core/framework/graph.pb.h>
#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/polymorphic_cast.hpp>
ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
if (numberOfInputs < 2)
{
ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
// If one of the inputs is a MatMul and the other is a const, then we handle both nodes
ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
return AddAdditionLayer(nodeDef, true);
}
ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
// Any requests for the output slots of this node should be forwarded to the node connected as input.
return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
BOOST_ASSERT(nodeDef.op() == "Const");
if (nodeDef.attr().count("value") == 0)
ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
if (inputs.size() != 2)
{
ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
const auto inputCount = inputs.size();
ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
// input consists of:
// input[0] the tensor which will be padded
// input[1] the tensor holding the padding values
ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
// In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
// Note: the Shape layer is handled in a special way, because:
// 1. ARMNN doesn't support int32 tensors which it outputs.
// 2. ARMNN works with statically shaped tensors which are known at parse time.
ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
NormalizationDescriptor normalizationDescriptor;
ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
// Defers the creation of the layer (see ParsedMatMulTfOperation).
return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
}
ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
return AddRealDivLayer(nodeDef);
}
ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::ReLu;
ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::BoundedReLu;
ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::Sigmoid;
ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
const tensorflow::GraphDef &graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::SoftReLu;
ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::TanH;
ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "armnnTfParser/ITfParser.hpp"
#include "ParserPrototxtFixture.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
+
BOOST_AUTO_TEST_SUITE(TensorflowParser)
struct SplitFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
SplitLastDimFixture(bool withDimZero=false) {
- boost::ignore_unused(withDimZero);
+ armnn::IgnoreUnused(withDimZero);
m_Prototext = R"(
node {
name: "Placeholder"
#pragma once
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <Half.hpp>
#include <iterator>
#include <vector>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnnUtils
{
static T Quantize(float value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return value;
}
static float Dequantize(T value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return value;
}
};
{
static armnn::Half Quantize(float value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return armnn::Half(value);
}
static float Dequantize(armnn::Half value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return value;
}
};
//
#include <QuantizeHelper.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <vector>
template<typename T>
bool IsFloatIterFunc(T iter)
{
- boost::ignore_unused(iter);
+ armnn::IgnoreUnused(iter);
return armnnUtils::IsFloatingPointIterator<T>::value;
}
// SPDX-License-Identifier: MIT
//
#include <armnn/Exceptions.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/LayerSupportBase.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
const WorkloadInfo& info,
Args&&... args)
{
- boost::ignore_unused(descriptor);
- boost::ignore_unused(info);
- boost::ignore_unused(args...);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(info);
+ IgnoreUnused(args...);
return nullptr;
}
};
TensorShape srcStrides = srcTensor->GetStrides();
const TensorShape& srcShape = srcTensor->GetShape();
const auto srcSize = srcTensor->GetStrides()[0] * srcShape[0];
- boost::ignore_unused(srcSize); // Only used for asserts
+ IgnoreUnused(srcSize); // Only used for asserts
TensorShape dstStrides = dstTensor->GetStrides();
const TensorShape& dstShape = dstTensor->GetShape();
const auto dstSize = dstTensor->GetStrides()[0] * dstShape[0];
- boost::ignore_unused(dstSize); // Only used for asserts
+ IgnoreUnused(dstSize); // Only used for asserts
size_t srcDepth = 1;
size_t srcBatches = 1;
#include "ProfilingUtils.hpp"
#include "RequestCounterDirectoryCommandHandler.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/BackendId.hpp>
#include <armnn/Logging.hpp>
#include <armnn/profiling/ISendTimelinePacket.hpp>
/// Create and write a CounterDirectoryPacket from the parameters to the buffer.
virtual void SendCounterDirectoryPacket(const ICounterDirectory& counterDirectory)
{
- boost::ignore_unused(counterDirectory);
+ armnn::IgnoreUnused(counterDirectory);
}
/// Create and write a PeriodicCounterCapturePacket from the parameters to the buffer.
virtual void SendPeriodicCounterSelectionPacket(uint32_t capturePeriod,
const std::vector<uint16_t>& selectedCounterIds)
{
- boost::ignore_unused(capturePeriod);
- boost::ignore_unused(selectedCounterIds);
+ armnn::IgnoreUnused(capturePeriod);
+ armnn::IgnoreUnused(selectedCounterIds);
}
std::vector<Timestamp> GetTimestamps()
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
unsigned int nIn, unsigned int nOut) \
{ \
- boost::ignore_unused(factory, nIn, nOut); \
+ IgnoreUnused(factory, nIn, nOut); \
return std::unique_ptr<armnn::IWorkload>(); \
} \
};
template<>
unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
{
- boost::ignore_unused(layer);
+ IgnoreUnused(layer);
return 2;
}
}
catch(const armnn::InvalidArgumentException& e)
{
- boost::ignore_unused(e);
+ IgnoreUnused(e);
// This is ok since we throw InvalidArgumentException when creating the dummy workload.
return true;
}
// InvalidArgumentException or UnimplementedException.
catch(const armnn::InvalidArgumentException& e)
{
- boost::ignore_unused(e);
+ IgnoreUnused(e);
return true;
}
catch(const armnn::UnimplementedException& e)
{
- boost::ignore_unused(e);
+ IgnoreUnused(e);
return true;
}
catch(const std::exception& e)
IBackendInternal::IBackendProfilingContextPtr MockBackend::CreateBackendProfilingContext(
const IRuntime::CreationOptions& options, IBackendProfilingPtr& backendProfiling)
{
- boost::ignore_unused(options);
+ IgnoreUnused(options);
std::shared_ptr<armnn::MockBackendProfilingContext> context =
std::make_shared<MockBackendProfilingContext>(backendProfiling);
MockBackendProfilingService::Instance().SetProfilingContextPtr(context);
#include <armnn/backends/IBackendInternal.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
constexpr const char* TestDynamicBackendId()
{
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
ILayerSupportSharedPtr GetLayerSupport() const override
unsigned int inputChannels,
unsigned int inputBatchSize)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = inputWidth;
unsigned int outputHeight = inputHeight;
unsigned int outputChannels = inputChannels;
float upperBound,
const armnn::ActivationDescriptor& activationDescriptor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int inputHeight = 20;
unsigned int inputWidth = 17;
unsigned int inputChannels = 3;
int32_t outOffset,
const std::vector<float>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
constexpr static unsigned int inputWidth = 16u;
constexpr static unsigned int inputHeight = 1u;
constexpr static unsigned int inputChannels = 1u;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const int inputDataSize = 120;
std::vector<float> inputData(inputDataSize);
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int width = 17;
unsigned int height = 29;
unsigned int channels = 2;
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
// Create Initial Tensor
// 1, 2, 3
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = 4;
unsigned int channels = 1;
unsigned int height = 2;
const std::vector<int32_t>& outputData,
int axis = 3)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
LayerTestResult<int32_t, 3> result(outputTensorInfo);
#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
int32_t qOffset,
armnn::DataLayout dataLayout)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2;
const unsigned int height = 3;
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2;
const unsigned int height = 3;
const unsigned int channels = 5;
float scale = 1.0f,
int32_t offset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
float outQuantScale,
int outQuantOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
const T * inputData,
std::vector<T>& outputData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
unsigned int & concatDim,
TensorInfo & outputTensorInfo)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
// Defines the tensor descriptors.
TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
constexpr unsigned int inputWidth = 3;
constexpr unsigned int inputHeight = 4;
constexpr unsigned int inputChannels = 3;
#include <QuantizeHelper.hpp>
#include <armnnUtils/TensorUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
uint32_t dilationX = 1,
uint32_t dilationY = 1)
{
- boost::ignore_unused(memoryManager);
+ armnn::IgnoreUnused(memoryManager);
unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
uint32_t strideX = 1,
uint32_t strideY = 1)
{
- boost::ignore_unused(qScale, qOffset);
+ armnn::IgnoreUnused(qScale, qOffset);
unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
bool biasEnabled,
armnn::DataLayout dataLayout)
{
- boost::ignore_unused(biasEnabled);
+ armnn::IgnoreUnused(biasEnabled);
// Use common single-batch 5x5 image.
armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
bool biasEnabled,
const armnn::DataLayout& dataLayout)
{
- boost::ignore_unused(biasEnabled);
+ armnn::IgnoreUnused(biasEnabled);
// Input is a single-batch, 1 channel, 5x5 image.
armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using namespace half_float::literal;
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using namespace half_float::literal;
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
{
PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
const std::vector<T1>& expectedOutputData,
armnn::DequantizeQueueDescriptor descriptor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
LayerTestResult<T1, Dim> ret(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2u;
const unsigned int height = 2u;
const unsigned int channelCount = 2u;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
constexpr unsigned int width = 2;
constexpr unsigned int height = 3;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
inputTensorInfo.SetQuantizationScale(0.1f);
bool biasEnabled,
bool transposeWeights)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
const std::vector<int32_t>& indicesData,
const std::vector<T>& outputData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto params = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
const armnn::DataLayout layout,
float epsilon = 1e-12f)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
float qScale = 1.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
LayerTestResult<T, NumDims> result(outputInfo);
result.outputExpected =
MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = 2;
unsigned int outputSize = 16;
unsigned int inputSize = 5;
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
bool cifgEnabled = true;
bool peepholeEnabled = true;
bool projectionEnabled = false;
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = 2;
unsigned int outputSize = 3;
unsigned int inputSize = 5;
const boost::multi_array<uint8_t, 2>& input,
const boost::multi_array<uint8_t, 2>& outputExpected)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2u;
const unsigned int height = 2u;
const unsigned int channelCount = 2u;
float scale = 1.0f,
int32_t offset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int shape0[] = { 1, 2, 2, 2 };
unsigned int shape1[] = { 1, 1, 1, 1 };
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 16;
const unsigned int height = 32;
const unsigned int channelCount = 2;
armnn::NormalizationAlgorithmChannel normChannel,
armnn::NormalizationAlgorithmMethod normMethod)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int inputHeight = 2;
const unsigned int inputWidth = 2;
const unsigned int inputChannels = 1;
int32_t qOffset,
const float customPaddingValue)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
LayerTestResult<T, 4> ret(outputTensorInfo);
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <backendsCommon/WorkloadInfo.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& outputExpected)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
auto heightIndex = dimensionIndices.GetHeightIndex();
float qScale = 1.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int inputWidth = 16;
const unsigned int inputHeight = 32;
const unsigned int channelCount = 2;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
const std::vector<T>& expectedOutputData,
armnn::QuantizeQueueDescriptor descriptor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
LayerTestResult<T, Dim> ret(outputTensorInfo);
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
LayerTestResult<T, NumDims> ret(outputTensorInfo);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const ResizeTestParams& params)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
const float qScale = 1.0f,
const int qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputInfo.SetQuantizationScale(qScale);
const std::vector<float>& inputData,
int axis = 1)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using std::exp;
const float qScale = 1.f / 256.f;
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2};
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
{
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int inputWidth = 5;
unsigned int inputHeight = 6;
unsigned int inputChannels = 3;
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale, int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
auto input = MakeTensor<T, 3>(
tensorInfo,
const std::vector<std::vector<T>>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int numInputs = static_cast<unsigned int>(inputData.size());
std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
for (unsigned int i = 0; i < numInputs; ++i)
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
const TensorData<T>& weights,
const armnn::Optional<TensorData<BT>>& biases)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using namespace armnn;
VerifyInputTensorData(input, "input");
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
LayerTestResult<T, 4> ret(outputTensorInfo);
#include <LeakChecking.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace cl
{
, m_ProfilingEnabled(profilingEnabled)
{
// Ignore m_ProfilingEnabled if unused to avoid compiling problems when ArmCompute is disabled.
- boost::ignore_unused(m_ProfilingEnabled);
+ IgnoreUnused(m_ProfilingEnabled);
try
{
#include "ClLayerSupport.hpp"
#include "ClBackendId.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/BackendRegistry.hpp>
#include <InternalTypes.hpp>
#include <LayerSupportCommon.hpp>
-#include <boost/core/ignore_unused.hpp>
-
#if defined(ARMCOMPUTECL_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include "workloads/ClTransposeWorkload.hpp"
#endif
-using namespace boost;
namespace armnn
{
template<typename ... Args>
bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
- boost::ignore_unused(reasonIfUnsupported, (args)...);
+ IgnoreUnused(reasonIfUnsupported, (args)...);
#if defined(ARMCOMPUTECL_ENABLED)
return true;
#else
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
}
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
return IsSupportedForDataTypeCl(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
*splitAxis.begin());
}
#endif
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
#include <armnn/Exceptions.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <boost/core/ignore_unused.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/format.hpp>
std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
DataLayout dataLayout,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
#include "OpenClTimer.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <string>
#include <sstream>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
const cl_event * event_wait_list,
cl_event * event)
{
- boost::ignore_unused(event);
+ IgnoreUnused(event);
cl_int retVal = 0;
// Get the name of the kernel
#include <backendsCommon/test/RuntimeTestImpl.hpp>
#include <test/ProfilingTestUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#ifdef WITH_VALGRIND
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
- boost::ignore_unused(dubious);
- boost::ignore_unused(suppressed);
+ IgnoreUnused(dubious);
+ IgnoreUnused(suppressed);
}
#endif
#include <Graph.hpp>
#include <Optimizer.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <set>
#include <InternalTypes.hpp>
#include <LayerSupportCommon.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#if defined(ARMCOMPUTENEON_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
#include "workloads/NeonTransposeWorkload.hpp"
#endif
-using namespace boost;
-
namespace armnn
{
template< typename ... Args>
bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
- boost::ignore_unused(reasonIfUnsupported, (args)...);
+ IgnoreUnused(reasonIfUnsupported, (args)...);
#if defined(ARMCOMPUTENEON_ENABLED)
return true;
#else
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
reasonIfUnsupported,
input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input);
- ignore_unused(output);
- ignore_unused(reasonIfUnsupported);
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
return true;
}
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input);
- ignore_unused(output);
- ignore_unused(reasonIfUnsupported);
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
return true;
}
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ armnn::IgnoreUnused(output);
return IsNeonBackendSupported(reasonIfUnsupported) &&
IsSupportedForDataTypeGeneric(reasonIfUnsupported,
input.GetDataType(),
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ armnn::IgnoreUnused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
reasonIfUnsupported,
input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ armnn::IgnoreUnused(descriptor);
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
*splitAxis.begin());
}
#endif
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
#include "NeonTensorHandleFactory.hpp"
#include "NeonTensorHandle.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
#include <Layer.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <neon/workloads/NeonWorkloads.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/polymorphic_cast.hpp>
namespace armnn
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
const WorkloadInfo &info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
#include <backendsCommon/WorkloadFactoryBase.hpp>
#include <aclCommon/BaseMemoryManager.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
uint32_t depthMultiplier = 1, uint32_t padLeft = 0, uint32_t padRight = 0,
uint32_t padTop = 0, uint32_t padBottom = 0)
{
- boost::ignore_unused(depthMultiplier);
+ IgnoreUnused(depthMultiplier);
DepthwiseConvolution2dDescriptor desc;
#include <armnn/TypesUtils.hpp>
#include <armnn/Types.hpp>
#include <armnn/Descriptors.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <LayerSupportCommon.hpp>
-
#include <backendsCommon/LayerSupportRules.hpp>
#include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <vector>
#include <array>
const armnn::ArgMinMaxDescriptor &descriptor,
armnn::Optional<std::string &> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedTypes =
{
const BatchNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedTypes =
{
const BatchToSpaceNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
const ComparisonDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedInputTypes =
{
const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,5> supportedTypes =
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
"Reference Convolution2d: biases is not a supported type.");
}
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
return supported;
}
const DepthToSpaceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
"Reference DepthwiseConvolution2d: biases is not a supported type.");
}
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
return supported;
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
+ IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
bool supported = true;
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedTypes =
{
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,1> supportedTypes =
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ IgnoreUnused(output);
bool supported = true;
std::array<DataType,3> supportedTypes =
const InstanceNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
// Define supported types
std::array<DataType, 4> supportedTypes =
{
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
// Define supported types
std::array<DataType, 4> supportedTypes =
{
const LogSoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 2> supportedTypes =
{
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
- ignore_unused(paramsInfo);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(paramsInfo);
bool supported = true;
const NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
// Define supported types
std::array<DataType, 4> supportedTypes =
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
- ignore_unused(descriptor);
+ IgnoreUnused(output);
+ IgnoreUnused(descriptor);
// Define supported output types.
std::array<DataType,7> supportedOutputTypes =
{
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,5> supportedTypes =
{
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType, 3> supportedTypes =
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,6> supportedTypes =
{
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,3> supportedTypes =
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
#include "RefTensorHandleFactory.hpp"
#include "RefTensorHandle.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> RefTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const
{
- boost::ignore_unused(dataLayout);
+ IgnoreUnused(dataLayout);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager, m_ImportFlags);
}
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
- boost::ignore_unused(isMemoryManaged);
+ IgnoreUnused(isMemoryManaged);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
- boost::ignore_unused(isMemoryManaged, dataLayout);
+ IgnoreUnused(isMemoryManaged, dataLayout);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
//
#pragma once
-#include <armnn/Optional.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
#include "RefMemoryManager.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/Optional.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
static armnn::RefWorkloadFactory GetFactory(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return armnn::RefWorkloadFactory();
}
};
void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
{
- boost::ignore_unused(outputTensorInfo);
+ IgnoreUnused(outputTensorInfo);
unsigned int uAxis = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
#pragma once
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <ResolveType.hpp>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
{
- boost::ignore_unused(axisIndex);
+ IgnoreUnused(axisIndex);
BOOST_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
#include "Dequantize.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
namespace armnn
{
const TensorInfo& inputInfo,
const TensorInfo& outputInfo)
{
- boost::ignore_unused(outputInfo);
+ IgnoreUnused(outputInfo);
BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
float* detectionScores,
float* numDetections)
{
- boost::ignore_unused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
+ IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
// Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
// which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
const int32_t* indices,
Encoder<float>& output)
{
- boost::ignore_unused(outputInfo);
+ IgnoreUnused(outputInfo);
const TensorShape& paramsShape = paramsInfo.GetShape();
unsigned int paramsProduct = 1;
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <cmath>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
BOOST_ASSERT_MSG(axisIsValid,
"Axis index is not in range [-numDimensions, numDimensions).");
- boost::ignore_unused(axisIsValid);
+ IgnoreUnused(axisIsValid);
unsigned int uAxis = descriptor.m_Axis < 0 ?
numDimensions - boost::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
DataType outputDataType = outputInfo.GetDataType();
BOOST_ASSERT(inputDataType == outputDataType);
- boost::ignore_unused(outputDataType);
+ IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
m_Data.m_Parameters,
#include "Slice.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
- boost::ignore_unused(dim0);
+ IgnoreUnused(dim0);
for (unsigned int idx0 = begin0; idx0 < begin0 + size0; ++idx0)
{
for (unsigned int idx1 = begin1; idx1 < begin1 + size1; ++idx1)
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
// SPDX-License-Identifier: MIT
//
+#pragma once
+
#include "Packet.hpp"
-#include <cstdint>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <cstdint>
namespace armnn
{
namespace profiling
{
-#pragma once
-
class CommandHandlerFunctor
{
public:
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/format.hpp>
namespace armnn
const Optional<uint16_t>& deviceUid,
const Optional<uint16_t>& counterSetUid)
{
- boost::ignore_unused(backendId);
+ IgnoreUnused(backendId);
// Check that the given parent category name is valid
if (parentCategoryName.empty() ||
bool FileOnlyProfilingConnection::WaitForStreamMeta(const unsigned char* buffer, uint32_t length)
{
- boost::ignore_unused(length);
+ IgnoreUnused(length);
// The first word, stream_metadata_identifer, should always be 0.
if (ToUint32(buffer, TargetEndianness::BeWire) != 0)
#include "PacketVersionResolver.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
Version PacketVersionResolver::ResolvePacketVersion(uint32_t familyId, uint32_t packetId) const
{
- boost::ignore_unused(familyId, packetId);
+ IgnoreUnused(familyId, packetId);
// NOTE: For now every packet specification is at version 1.0.0
return Version(1, 0, 0);
}
#include <atomic>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
bool IsOneOfStates(ProfilingState state1)
{
- boost::ignore_unused(state1);
+ IgnoreUnused(state1);
return false;
}
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
#include <Processes.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <cstring>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <cstring>
#include <ProfilingService.hpp>
#include <Runtime.hpp>
#include <Filesystem.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/filesystem.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/unit_test.hpp>
#include "../ProfilingConnectionDumpToFileDecorator.hpp"
#include <Runtime.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <fstream>
#include <sstream>
-#include <boost/core/ignore_unused.hpp>
#include <boost/filesystem.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/unit_test.hpp>
bool WritePacket(const unsigned char* buffer, uint32_t length) override
{
- boost::ignore_unused(buffer);
- boost::ignore_unused(length);
+ armnn::IgnoreUnused(buffer);
+ armnn::IgnoreUnused(length);
return true;
}
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ armnn::IgnoreUnused(timeout);
return std::move(*m_Packet);
}
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ IgnoreUnused(timeout);
// Simulate a delay in the reading process. The default timeout is way too long.
std::this_thread::sleep_for(std::chrono::milliseconds(5));
public:
IProfilingConnectionPtr GetProfilingConnection(const ExternalProfilingOptions& options) const override
{
- boost::ignore_unused(options);
+ IgnoreUnused(options);
return std::make_unique<MockProfilingConnection>();
}
};
void SendCounterDirectoryPacket(const ICounterDirectory& counterDirectory) override
{
- boost::ignore_unused(counterDirectory);
+ IgnoreUnused(counterDirectory);
std::string message("SendCounterDirectoryPacket");
unsigned int reserved = 0;
void SendPeriodicCounterCapturePacket(uint64_t timestamp,
const std::vector<CounterValue>& values) override
{
- boost::ignore_unused(timestamp, values);
+ IgnoreUnused(timestamp, values);
std::string message("SendPeriodicCounterCapturePacket");
unsigned int reserved = 0;
void SendPeriodicCounterSelectionPacket(uint32_t capturePeriod,
const std::vector<uint16_t>& selectedCounterIds) override
{
- boost::ignore_unused(capturePeriod, selectedCounterIds);
+ IgnoreUnused(capturePeriod, selectedCounterIds);
std::string message("SendPeriodicCounterSelectionPacket");
unsigned int reserved = 0;
const armnn::Optional<uint16_t>& deviceUid = armnn::EmptyOptional(),
const armnn::Optional<uint16_t>& counterSetUid = armnn::EmptyOptional())
{
- boost::ignore_unused(backendId);
+ IgnoreUnused(backendId);
// Get the number of cores from the argument only
uint16_t deviceCores = numberOfCores.has_value() ? numberOfCores.value() : 0;
const Device* GetDevice(uint16_t uid) const override
{
- boost::ignore_unused(uid);
+ IgnoreUnused(uid);
return nullptr; // Not used by the unit tests
}
const CounterSet* GetCounterSet(uint16_t uid) const override
{
- boost::ignore_unused(uid);
+ IgnoreUnused(uid);
return nullptr; // Not used by the unit tests
}
const Counter* GetCounter(uint16_t uid) const override
{
- boost::ignore_unused(uid);
+ IgnoreUnused(uid);
return nullptr; // Not used by the unit tests
}
#include <armnn/Types.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/numeric/conversion/cast.hpp>
{
bool IsCounterRegistered(uint16_t counterUid) const override
{
- boost::ignore_unused(counterUid);
+ armnn::IgnoreUnused(counterUid);
return true;
}
uint16_t GetCounterCount() const override
}
uint32_t GetCounterValue(uint16_t counterUid) const override
{
- boost::ignore_unused(counterUid);
+ armnn::IgnoreUnused(counterUid);
return 0;
}
};
//not used
bool IsCounterRegistered(uint16_t counterUid) const override
{
- boost::ignore_unused(counterUid);
+ armnn::IgnoreUnused(counterUid);
return false;
}
bool WritePacket(const unsigned char* buffer, uint32_t length) override
{
- boost::ignore_unused(buffer, length);
+ IgnoreUnused(buffer, length);
return false;
}
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ IgnoreUnused(timeout);
++m_ReadRequests;
throw armnn::Exception("Simulate a non-timeout error");
}
public:
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ IgnoreUnused(timeout);
// Connection Acknowledged Packet header (word 0, word 1 is always zero):
// 26:31 [6] packet_family: Control Packet Family, value 0b000000
// 16:25 [10] packet_id: Packet identifier, value 0b0000000001
void operator()(const Packet& packet) override
{
- boost::ignore_unused(packet);
+ IgnoreUnused(packet);
m_Count++;
}
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
#include "InferenceTest.hpp"
#include "DeepSpeechV1Database.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- boost::ignore_unused(options);
+ armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
BOOST_ASSERT(output1.size() == k_OutputSize1);
//
#pragma once
+#include "InferenceModel.hpp"
+
#include <armnn/ArmNN.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
-#include "InferenceModel.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/program_options.hpp>
virtual void AddCommandLineOptions(boost::program_options::options_description& options)
{
- boost::ignore_unused(options);
+ IgnoreUnused(options);
};
virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions)
{
- boost::ignore_unused(commonOptions);
+ IgnoreUnused(commonOptions);
return true;
};
virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) = 0;
void operator()(const std::vector<int>& values)
{
- boost::ignore_unused(values);
+ IgnoreUnused(values);
BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
}
//
#include "InferenceTestImage.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <array>
return GetImageDataInArmNnLayoutAsFloats(layout, image,
[](ImageChannel channel, float value)
{
- boost::ignore_unused(channel);
+ armnn::IgnoreUnused(channel);
return value / 255.f;
});
}
#include "InferenceTest.hpp"
#include "MobileNetSsdDatabase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- boost::ignore_unused(options);
+ armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
BOOST_ASSERT(output1.size() == k_OutputSize1);
const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
const bool printIntermediate, bool enableLayerDetails = false, bool parseUnuspported = false)
{
- boost::ignore_unused(runtime);
+ IgnoreUnused(runtime);
std::string modelFormat;
std::string modelPath;
std::string inputNames;
#include "InferenceTest.hpp"
#include "YoloDatabase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <algorithm>
#include <array>
#include <utility>
virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- boost::ignore_unused(options);
+ armnn::IgnoreUnused(options);
using Boost3dArray = boost::multi_array<float, 3>;