#ifndef NDEBUG
# define ARMNN_ASSERT(COND) assert(COND)
-# define ARMNN_ASSERT_MSG(COND, MSG) assert(COND && MSG)
+# define ARMNN_ASSERT_MSG(COND, MSG) assert((COND) && MSG)
#else
# define ARMNN_ASSERT(COND)
# define ARMNN_ASSERT_MSG(COND, MSG)
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnnUtils
{
unsigned int batchIndex, unsigned int channelIndex,
unsigned int heightIndex, unsigned int widthIndex) const
{
- BOOST_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) );
- BOOST_ASSERT( channelIndex < shape[m_ChannelsIndex] ||
+ ARMNN_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) );
+ ARMNN_ASSERT( channelIndex < shape[m_ChannelsIndex] ||
( shape[m_ChannelsIndex] == 0 && channelIndex == 0) );
- BOOST_ASSERT( heightIndex < shape[m_HeightIndex] ||
+ ARMNN_ASSERT( heightIndex < shape[m_HeightIndex] ||
( shape[m_HeightIndex] == 0 && heightIndex == 0) );
- BOOST_ASSERT( widthIndex < shape[m_WidthIndex] ||
+ ARMNN_ASSERT( widthIndex < shape[m_WidthIndex] ||
( shape[m_WidthIndex] == 0 && widthIndex == 0) );
/// Offset the given indices appropriately depending on the data layout
#include <armnn/TypesUtils.hpp>
-#include <boost/assert.hpp>
-
namespace armnnUtils
{
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
#include "armnn/Descriptors.hpp"
#include "armnn/Logging.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <algorithm>
#include <array>
#include <vector>
// Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
void OriginsDescriptor::ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering)
{
- BOOST_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
+ ARMNN_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
"elements in the new ordering array");
std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]);
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/polymorphic_cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <unordered_map>
Status Graph::AllocateDynamicBuffers()
{
// Layers must be sorted in topological order
- BOOST_ASSERT(m_LayersInOrder);
+ ARMNN_ASSERT(m_LayersInOrder);
std::unordered_set<const ITensorHandle*> preallocatedTensors;
std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
auto MayNeedCompatibilityLayer = [](const Layer& layer)
{
// All layers should have been associated with a valid compute device at this point.
- BOOST_ASSERT(layer.GetBackendId() != Compute::Undefined);
+ ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
// Does not need another compatibility layer if a copy or import layer is already present.
return layer.GetType() != LayerType::MemCopy &&
layer.GetType() != LayerType::MemImport;
ForEachLayer([this, &backends, ®istry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
{
- BOOST_ASSERT(srcLayer);
+ ARMNN_ASSERT(srcLayer);
if (!MayNeedCompatibilityLayer(*srcLayer))
{
for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
{
InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
- BOOST_ASSERT(dstInputSlot);
+ ARMNN_ASSERT(dstInputSlot);
EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
- BOOST_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
+ ARMNN_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
"Undefined memory strategy found while adding copy layers for compatibility");
const Layer& dstLayer = dstInputSlot->GetOwningLayer();
}
else
{
- BOOST_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
+ ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
}
void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
{
- BOOST_ASSERT(substituteLayer != nullptr);
+ ARMNN_ASSERT(substituteLayer != nullptr);
ReplaceSubgraphConnections(subgraph, substituteLayer);
EraseSubgraphLayers(subgraph);
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectableLayer* substituteLayer)
{
- BOOST_ASSERT(substituteLayer != nullptr);
+ ARMNN_ASSERT(substituteLayer != nullptr);
// Create a new sub-graph with only the given layer, using
// the given sub-graph as a reference of which parent graph to use
void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
{
- BOOST_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
+ ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
{
IgnoreUnused(layer);
- BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
+ ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
- BOOST_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
- BOOST_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
+ ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
+ ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
// Disconnect the sub-graph and replace it with the substitute sub-graph
for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
{
InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
- BOOST_ASSERT(subgraphInputSlot);
+ ARMNN_ASSERT(subgraphInputSlot);
IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
- BOOST_ASSERT(connectedOutputSlot);
+ ARMNN_ASSERT(connectedOutputSlot);
connectedOutputSlot->Disconnect(*subgraphInputSlot);
IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
- BOOST_ASSERT(substituteInputSlot);
+ ARMNN_ASSERT(substituteInputSlot);
connectedOutputSlot->Connect(*substituteInputSlot);
}
for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
{
OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
- BOOST_ASSERT(subgraphOutputSlot);
+ ARMNN_ASSERT(subgraphOutputSlot);
OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
- BOOST_ASSERT(substituteOutputSlot);
+ ARMNN_ASSERT(substituteOutputSlot);
subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
}
}
#include <armnn/TensorFwd.hpp>
#include <armnn/NetworkFwd.hpp>
#include <armnn/Exceptions.hpp>
+#include <armnn/utility/Assert.hpp>
#include <list>
#include <map>
#include <unordered_set>
#include <vector>
-#include <boost/assert.hpp>
#include <boost/iterator/transform_iterator.hpp>
namespace armnn
otherLayer->Reparent(*this, m_Layers.end());
});
- BOOST_ASSERT(other.m_PosInGraphMap.empty());
- BOOST_ASSERT(other.m_Layers.empty());
+ ARMNN_ASSERT(other.m_PosInGraphMap.empty());
+ ARMNN_ASSERT(other.m_Layers.empty());
return *this;
}
const size_t numErased = graph.m_PosInGraphMap.erase(this);
IgnoreUnused(numErased);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
}
protected:
{
const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
IgnoreUnused(numErased);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
}
};
{
const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
IgnoreUnused(numErased);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
}
};
inline Graph::Iterator Graph::GetPosInGraph(Layer& layer)
{
auto it = m_PosInGraphMap.find(&layer);
- BOOST_ASSERT(it != m_PosInGraphMap.end());
+ ARMNN_ASSERT(it != m_PosInGraphMap.end());
return it->second;
}
const Iterator pos = std::next(GetPosInGraph(owningLayer));
LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
- BOOST_ASSERT(layer->GetNumInputSlots() == 1);
+ ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
insertAfter.MoveAllConnections(layer->GetOutputSlot());
insertAfter.Connect(layer->GetInputSlot(0));
template <typename LayerT>
inline void Graph::EraseLayer(LayerT*& layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
EraseLayer(GetPosInGraph(*layer));
layer = nullptr;
}
#include "InternalTypes.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
case LayerType::TransposeConvolution2d: return "TransposeConvolution2d";
case LayerType::Transpose: return "Transpose";
default:
- BOOST_ASSERT_MSG(false, "Unknown layer type");
+ ARMNN_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
}
}
void InputSlot::Insert(Layer& layer)
{
- BOOST_ASSERT(layer.GetNumOutputSlots() == 1);
+ ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
OutputSlot* const prevSlot = GetConnectedOutputSlot();
prevSlot->Disconnect(*this);
// Connects inserted layer to parent.
- BOOST_ASSERT(layer.GetNumInputSlots() == 1);
+ ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
int idx = prevSlot->Connect(layer.GetInputSlot(0));
prevSlot->SetEdgeStrategy(boost::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
{
- BOOST_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
+ ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
return shape == m_OutputHandler.GetTensorInfo().GetShape();
}
{
while (GetNumConnections() > 0)
{
- BOOST_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
+ ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
"Cannot move connections once memory strategies have be established.");
InputSlot& connection = *GetConnection(0);
return i;
}
}
- BOOST_ASSERT_MSG(false, "Did not find slot on owner.");
+ ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
return 0; // Error
}
for (auto&& inputSlot : GetInputSlots())
{
// The graph must be well-formed at this point.
- BOOST_ASSERT(inputSlot.GetConnection());
+ ARMNN_ASSERT(inputSlot.GetConnection());
const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
}
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- BOOST_ASSERT(handleFactory);
+ ARMNN_ASSERT(handleFactory);
handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
}
}
void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
{
- BOOST_ASSERT(GetNumInputSlots() == expectedConnections);
+ ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
for (unsigned int i=0; i<expectedConnections; ++i)
{
std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(GetNumInputSlots() != 0);
- BOOST_ASSERT(GetNumOutputSlots() != 0);
+ ARMNN_ASSERT(GetNumInputSlots() != 0);
+ ARMNN_ASSERT(GetNumOutputSlots() != 0);
// By default we return what we got, meaning the output shape(s) are the same as the input(s).
// This only works if the number of inputs and outputs are the same. Since we are in the Layer
#include <armnn/backends/IBackendInternal.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cstring>
#include <algorithm>
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- BOOST_ASSERT(inputs.size() > 0);
+ ARMNN_ASSERT(inputs.size() > 0);
FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
}
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- BOOST_ASSERT(inputs.size() > 0);
+ ARMNN_ASSERT(inputs.size() > 0);
ARMNN_NO_DEPRECATE_WARN_BEGIN
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnn/backends/IMemoryManager.hpp>
#include <LabelsAndEventClasses.hpp>
#include <boost/polymorphic_cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
namespace armnn
for (auto&& input : layer.GetInputSlots())
{
const IOutputSlot* source = input.GetConnectedOutputSlot();
- BOOST_ASSERT(source != NULL);
+ ARMNN_ASSERT(source != NULL);
timelineUtils->CreateConnectionRelationship(ProfilingRelationshipType::RetentionLink,
source->GetOwningLayerGuid(),
layer.GetGuid());
{
for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
{
- BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
if (inputLayer->GetBindingId() == layerId)
{
return inputLayer->GetOutputSlot(0).GetTensorInfo();
{
for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
{
- BOOST_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
- BOOST_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
+ ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
+ ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
if (outputLayer->GetBindingId() == layerId)
{
return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
workloadFactory = it->second.first.get();
- BOOST_ASSERT_MSG(workloadFactory, "No workload factory");
+ ARMNN_ASSERT_MSG(workloadFactory, "No workload factory");
std::string reasonIfUnsupported;
- BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
+ ARMNN_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
"Factory does not support layer");
IgnoreUnused(reasonIfUnsupported);
return *workloadFactory;
inputQueueDescriptor.m_Inputs.push_back(tensorHandle);
info.m_InputTensorInfos.push_back(tensorInfo);
- BOOST_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
+ ARMNN_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
const OutputHandler& handler = layer.GetOutputHandler();
const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
ITensorHandle* outputTensorHandle = handler.GetData();
- BOOST_ASSERT_MSG(outputTensorHandle != nullptr,
+ ARMNN_ASSERT_MSG(outputTensorHandle != nullptr,
"Data should have been allocated.");
inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle);
info.m_OutputTensorInfos.push_back(outputTensorInfo);
// Create a mem copy workload for input since we did not import
std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor, info);
- BOOST_ASSERT_MSG(inputWorkload, "No input workload created");
+ ARMNN_ASSERT_MSG(inputWorkload, "No input workload created");
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
outputQueueDescriptor.m_Outputs.push_back(tensorHandle);
info.m_OutputTensorInfos.push_back(tensorInfo);
- BOOST_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
+ ARMNN_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
// Gets the output handler from the previous node.
const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
ITensorHandle* inputTensorHandle = outputHandler.GetData();
- BOOST_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+ ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
// Try import the output tensor.
// Note: We can only import the output pointer if all of the following hold true:
syncDesc.m_Inputs.push_back(inputTensorHandle);
info.m_InputTensorInfos.push_back(inputTensorInfo);
auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
- BOOST_ASSERT_MSG(syncWorkload, "No sync workload created");
+ ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
m_OutputQueue.push_back(move(syncWorkload));
}
else
std::unique_ptr<IWorkload> outputWorkload =
std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info);
- BOOST_ASSERT_MSG(outputWorkload, "No output workload created");
+ ARMNN_ASSERT_MSG(outputWorkload, "No output workload created");
std::unique_ptr<TimelineUtilityMethods> timelineUtils =
TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
#include <armnn/Logging.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
#if defined(_MSC_VER)
#ifndef NOMINMAX
#include <android/log.h>
#endif
-#include <boost/assert.hpp>
#include <iostream>
namespace armnn
SimpleLogger<LogSeverity::Fatal>::Get().Enable(true);
break;
default:
- BOOST_ASSERT(false);
+ ARMNN_ASSERT(false);
}
}
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <ProfilingService.hpp>
#include <vector>
#include <algorithm>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/converter_policies.hpp>
#include <boost/cast.hpp>
}
else
{
- BOOST_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
+ ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
}
}
}
{
auto backendFactory = backendRegistry.GetFactory(selectedBackend);
auto backendObjPtr = backendFactory();
- BOOST_ASSERT(backendObjPtr);
+ ARMNN_ASSERT(backendObjPtr);
backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
BackendsMap& backends,
Optional<std::vector<std::string>&> errMessages)
{
- BOOST_ASSERT(optNetObjPtr);
+ ARMNN_ASSERT(optNetObjPtr);
OptimizationResult result;
for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
{
auto backendObjPtr = backends.find(selectedBackend)->second.get();
- BOOST_ASSERT(backendObjPtr);
+ ARMNN_ASSERT(backendObjPtr);
// Select sub-graphs based on backend
SubgraphViewSelector::Subgraphs subgraphs =
{
// Try to optimize the current sub-graph
OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
- BOOST_ASSERT(optimizationViews.Validate(*subgraph));
+ ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
// Optimization attempted, check the resulting optimized sub-graph
for (auto& substitution : optimizationViews.GetSubstitutions())
// Assign the current backend to the optimized sub-graph
std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
{
- BOOST_ASSERT(l);
+ ARMNN_ASSERT(l);
l->SetBackendId(selectedBackend);
});
}
TensorHandleFactoryRegistry& registry)
{
Layer& layer = slot.GetOwningLayer();
- BOOST_ASSERT(layer.GetType() == LayerType::Input);
+ ARMNN_ASSERT(layer.GetType() == LayerType::Input);
// Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
// doesn't matter which backend it is assigned to because they all use the same implementation, which
const Layer& connectedLayer = connection->GetOwningLayer();
auto toBackend = backends.find(connectedLayer.GetBackendId());
- BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
{
const Layer& connectedLayer = connection->GetOwningLayer();
auto toBackend = backends.find(connectedLayer.GetBackendId());
- BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
for (auto&& src : srcPrefs)
TensorHandleFactoryRegistry& registry)
{
auto toBackend = backends.find(connectedLayer.GetBackendId());
- BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+ ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
optGraph.ForEachLayer([&backends, ®istry, &result, &errMessages](Layer* layer)
{
- BOOST_ASSERT(layer);
+ ARMNN_ASSERT(layer);
// Lets make sure the backend is in our list of supported backends. Something went wrong during backend
// assignment if this check fails
- BOOST_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
+ ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
// Check each output separately
for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
{
auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
auto backendPtr = factoryFun();
- BOOST_ASSERT(backendPtr.get() != nullptr);
+ ARMNN_ASSERT(backendPtr.get() != nullptr);
ARMNN_NO_DEPRECATE_WARN_BEGIN
auto backendSpecificOptimizations = backendPtr->GetOptimizations();
}
break;
default:
- BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
+ ARMNN_ASSERT_MSG(false, "Can't quantize unsupported data type");
}
TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/ILayerVisitor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <utility>
#include <limits>
-#include <boost/assert.hpp>
-
namespace armnn
{
template<typename srcType>
void QuantizeConstant(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
{
- BOOST_ASSERT(src);
- BOOST_ASSERT(dst);
+ ARMNN_ASSERT(src);
+ ARMNN_ASSERT(dst);
float min = std::numeric_limits<srcType>::max();
float max = std::numeric_limits<srcType>::lowest();
graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str());
// Sets output tensor info for the debug layer.
- BOOST_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
+ ARMNN_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
debugLayer->GetOutputSlot().SetTensorInfo(debugInfo);
--it;
for (auto&& optimization : optimizations)
{
- BOOST_ASSERT(*it);
+ ARMNN_ASSERT(*it);
optimization->Run(graph, **it);
if ((*it)->IsOutputUnconnected())
#include <backendsCommon/WorkloadDataCollector.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
#include <string>
#include <vector>
-#include <boost/assert.hpp>
-
namespace armnn
{
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
#include "Profiling.hpp"
#include <armnn/BackendId.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include "JsonPrinter.hpp"
Measurement FindMeasurement(const std::string& name, const Event* event)
{
- BOOST_ASSERT(event != nullptr);
+ ARMNN_ASSERT(event != nullptr);
// Search though the measurements.
for (const auto& measurement : event->GetMeasurements())
std::vector<Measurement> FindKernelMeasurements(const Event* event)
{
- BOOST_ASSERT(event != nullptr);
+ ARMNN_ASSERT(event != nullptr);
std::vector<Measurement> measurements;
{
event->Stop();
- BOOST_ASSERT(!m_Parents.empty());
- BOOST_ASSERT(event == m_Parents.top());
+ ARMNN_ASSERT(!m_Parents.empty());
+ ARMNN_ASSERT(event == m_Parents.top());
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
IgnoreUnused(parent);
- BOOST_ASSERT(event->GetParentEvent() == parent);
+ ARMNN_ASSERT(event->GetParentEvent() == parent);
#if ARMNN_STREAMLINE_ENABLED
ANNOTATE_CHANNEL_END(uint32_t(m_Parents.size()));
JsonChildObject& parentObject,
std::map<const Event*, std::vector<const Event*>> descendantsMap)
{
- BOOST_ASSERT(parentEvent);
+ ARMNN_ASSERT(parentEvent);
std::vector<Measurement> instrumentMeasurements = parentEvent->GetMeasurements();
unsigned int childIdx=0;
for(size_t measurementIndex = 0; measurementIndex < instrumentMeasurements.size(); ++measurementIndex, ++childIdx)
measurementObject.SetUnit(instrumentMeasurements[measurementIndex].m_Unit);
measurementObject.SetType(JsonObjectType::Measurement);
- BOOST_ASSERT(parentObject.NumChildren() == childIdx);
+ ARMNN_ASSERT(parentObject.NumChildren() == childIdx);
parentObject.AddChild(measurementObject);
}
void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* srcLayer,
IConnectableLayer* quantizedLayer)
{
- BOOST_ASSERT(srcLayer);
+ ARMNN_ASSERT(srcLayer);
for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++)
{
const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i);
const InputSlot* inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot);
- BOOST_ASSERT(inputSlot);
+ ARMNN_ASSERT(inputSlot);
const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
- BOOST_ASSERT(outputSlot);
+ ARMNN_ASSERT(outputSlot);
unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
Layer& layerToFind = outputSlot->GetOwningLayer();
if (found == m_OriginalToQuantizedGuidMap.end())
{
// Error in graph traversal order
- BOOST_ASSERT_MSG(false, "Error in graph traversal");
+ ARMNN_ASSERT_MSG(false, "Error in graph traversal");
return;
}
const Optional<ConstTensor>& biases,
std::vector<int32_t>& backing)
{
- BOOST_ASSERT(srcLayer);
+ ARMNN_ASSERT(srcLayer);
const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0);
auto inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot);
- BOOST_ASSERT(inputSlot);
+ ARMNN_ASSERT(inputSlot);
const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
- BOOST_ASSERT(outputSlot);
+ ARMNN_ASSERT(outputSlot);
unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
Layer& layerToFind = outputSlot->GetOwningLayer();
if (found == m_OriginalToQuantizedGuidMap.end())
{
// Error in graph traversal order
- BOOST_ASSERT_MSG(false, "Error in graph traversal");
+ ARMNN_ASSERT_MSG(false, "Error in graph traversal");
return biases.value();
}
try {
auto factoryFun = BackendRegistryInstance().GetFactory(id);
auto backend = factoryFun();
- BOOST_ASSERT(backend.get() != nullptr);
+ ARMNN_ASSERT(backend.get() != nullptr);
auto context = backend->CreateBackendContext(options);
IgnoreUnused(errorMessage);
// Check if the item is valid
- BOOST_ASSERT_MSG(i, errorMessage.c_str());
+ ARMNN_ASSERT_MSG(i, errorMessage.c_str());
// Check if a duplicate has been found
- BOOST_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
+ ARMNN_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
duplicateSet.insert(i);
});
#include "SubgraphViewSelector.hpp"
#include "Graph.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <algorithm>
#include <map>
#include <queue>
for (PartialSubgraph* a : m_Antecedents)
{
size_t numErased = a->m_Dependants.erase(this);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
IgnoreUnused(numErased);
a->m_Dependants.insert(m_Parent);
}
for (PartialSubgraph* a : m_Dependants)
{
size_t numErased = a->m_Antecedents.erase(this);
- BOOST_ASSERT(numErased == 1);
+ ARMNN_ASSERT(numErased == 1);
IgnoreUnused(numErased);
a->m_Antecedents.insert(m_Parent);
}
for (auto&& slot = m_Layer->BeginInputSlots(); slot != m_Layer->EndInputSlots(); ++slot)
{
OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
- BOOST_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
+ ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
if (parentLayerOutputSlot)
{
Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer();
for (auto inputSlot : layer.GetInputSlots())
{
auto connectedInput = boost::polymorphic_downcast<OutputSlot*>(inputSlot.GetConnection());
- BOOST_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
+ ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
Layer& inputLayer = connectedInput->GetOwningLayer();
auto parentInfo = layerInfos.find(&inputLayer);
#include "armnn/Exceptions.hpp"
#include "armnn/TypesUtils.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <sstream>
return 1.0f;
}
- BOOST_ASSERT(!HasMultipleQuantizationScales());
+ ARMNN_ASSERT(!HasMultipleQuantizationScales());
return m_Quantization.m_Scales[0];
}
// SPDX-License-Identifier: MIT
//
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
+ ARMNN_ASSERT(scale != 0.f);
+ ARMNN_ASSERT(!std::isnan(value));
float clampedValue = std::min(std::max(static_cast<float>(round(value/scale) + offset), static_cast<float>(min)),
static_cast<float>(max));
float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
{
static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!IsNan(value));
+ ARMNN_ASSERT(scale != 0.f);
+ ARMNN_ASSERT(!IsNan(value));
float dequantized = boost::numeric_cast<float>(value - offset) * scale;
return dequantized;
}
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"AbsLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ActivationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
auto inputNumDimensions = inputShape.GetNumDimensions();
auto axis = m_Param.m_Axis;
auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
- BOOST_ASSERT(unsignedAxis <= inputNumDimensions);
+ ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
// 1D input shape results in scalar output
if (inputShape.GetNumDimensions() == 1)
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.",
std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
- BOOST_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
- BOOST_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
- BOOST_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
+ ARMNN_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
+ ARMNN_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
+ ARMNN_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
+ ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
BatchNormalizationQueueDescriptor descriptor;
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"BatchNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
1U,
std::multiplies<>());
- BOOST_ASSERT(inputShape[0] % accumulatedBlockShape == 0);
+ ARMNN_ASSERT(inputShape[0] % accumulatedBlockShape == 0);
outputShape[0] = inputShape[0] / accumulatedBlockShape;
unsigned int outputHeight = inputShape[heightIndex] * m_Param.m_BlockShape[0];
unsigned int outputWidth = inputShape[widthIndex] * m_Param.m_BlockShape[1];
- BOOST_ASSERT_MSG(heightCrop <= outputHeight,
+ ARMNN_ASSERT_MSG(heightCrop <= outputHeight,
"BatchToSpaceLayer: Overall height crop should be less than or equal to the uncropped output height.");
- BOOST_ASSERT_MSG(widthCrop <= outputWidth,
+ ARMNN_ASSERT_MSG(widthCrop <= outputWidth,
"BatchToSpaceLayer: Overall width crop should be less than or equal to the uncropped output width.");
outputShape[heightIndex] = outputHeight - heightCrop;
std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& input0 = inputShapes[0];
const TensorShape& input1 = inputShapes[1];
- BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
unsigned int numDims = input0.GetNumDimensions();
std::vector<unsigned int> dims(numDims);
unsigned int dim0 = input0[i];
unsigned int dim1 = input1[i];
- BOOST_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
"Dimensions should either match or one should be of size 1.");
dims[i] = std::max(dim0, dim1);
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ComparisonLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
OutputHandler& outputHandler = slot->GetOutputHandler();
- BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
+ ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
outputHandler.SetData(std::move(subTensor));
Layer& inputLayer = slot->GetOwningLayer();
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- BOOST_ASSERT(handleFactory);
+ ARMNN_ASSERT(handleFactory);
CreateTensors(*handleFactory);
}
}
std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+ ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
unsigned int numDims = m_Param.GetNumDimensions();
for (unsigned int i=0; i< inputShapes.size(); i++)
auto inferredShapes = InferOutputShapes(inputShapes);
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertFp16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertFp32ToBf16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
Convolution2dQueueDescriptor descriptor;
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape filterShape = inputShapes[1];
// If we support multiple batch dimensions in the future, then this assert will need to change.
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
VerifyLayerConnections(1, CHECK_LOCATION());
// check if we m_Weight data is not nullptr
- BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DebugLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DepthToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
DepthwiseConvolution2dQueueDescriptor descriptor;
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
std::vector<TensorShape>
DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape& filterShape = inputShapes[1];
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
VerifyLayerConnections(1, CHECK_LOCATION());
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DepthwiseConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
VerifyLayerConnections(2, CHECK_LOCATION());
// on this level constant data should not be released.
- BOOST_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
+ ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
- BOOST_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
+ ARMNN_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
unsigned int detectedBoxes = m_Param.m_MaxDetections * m_Param.m_MaxClassesPerDetection;
#include "InternalTypes.hpp"
#include "armnn/Exceptions.hpp"
#include <armnn/TypesUtils.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
auto& input0 = inputShapes[0];
auto& input1 = inputShapes[1];
// Get the max of the inputs.
- BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
unsigned int numDims = input0.GetNumDimensions();
std::vector<unsigned int> dims(numDims);
#if !NDEBUG
// Validate inputs are broadcast compatible.
- BOOST_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+ ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
"Dimensions should either match or one should be of size 1.");
#endif
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
std::string msg = GetLayerTypeAsCString(GetType());
msg += "Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.";
std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
// Should return the shape of the input tensor
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& input = inputShapes[0];
return std::vector<TensorShape>({ input });
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"FloorLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
// on this level constant data should not be released..
- BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
FullyConnectedQueueDescriptor descriptor;
descriptor.m_Weight = m_Weight.get();
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape weightShape = inputShapes[1];
VerifyLayerConnections(1, CHECK_LOCATION());
// check if we m_Weight data is not nullptr
- BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"InstanceNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"L2NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
VerifyLayerConnections(1, CHECK_LOCATION());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"LogSoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int batchSize = inputShapes[0][0];
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape()}
);
- BOOST_ASSERT(inferredShapes.size() == 4);
+ ARMNN_ASSERT(inferredShapes.size() == 4);
// Check if the weights are nullptr
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
"LstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
"LstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
"LstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
"LstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
"LstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
"LstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
"LstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
"LstmLayer: m_BasicParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
"LstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
if (!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
"LstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
"LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
ConditionalThrowIfNotEqual<LayerValidationException>(
}
else
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
"LstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
"LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
ConditionalThrowIfNotEqual<LayerValidationException>(
if (m_Param.m_ProjectionEnabled)
{
- BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
"LstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
}
{
if (!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
"LstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
"when Peephole is enabled and CIFG is disabled.");
}
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
"LstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
"LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
{
if(!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
}
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
"LstmLayer: m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
}
}
const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
- BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
+ ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
"MeanLayer: Mean supports up to 4D input.");
unsigned int rank = input.GetNumDimensions();
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MemCopyLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MemImportLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(),
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MergeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
ConditionalThrowIfNotEqual<LayerValidationException>(
"MergeLayer: TensorShapes set on inputs do not match",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inShape = inputShapes[0];
return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
}
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"PermuteLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
// If we support multiple batch dimensions in the future, then this assert will need to change.
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
unsigned int outHeight = 1;
if (!isGlobalPooling)
{
- BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
+ ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
"Stride can only be zero when performing global pooling");
auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
size = static_cast<unsigned int>(floor(div)) + 1;
break;
default:
- BOOST_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+ ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
}
// MakeS sure that border operations will start from inside the input and not the padded area.
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape& alphaShape = inputShapes[1];
const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
- BOOST_ASSERT(inputShapeDimensions > 0);
- BOOST_ASSERT(alphaShapeDimensions > 0);
+ ARMNN_ASSERT(inputShapeDimensions > 0);
+ ARMNN_ASSERT(alphaShapeDimensions > 0);
// The size of the output is the maximum size along each dimension of the input operands,
// it starts with the trailing dimensions, and works its way forward
unsigned int alphaDimension = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
// Check that the inputs are broadcast compatible
- BOOST_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
+ ARMNN_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
"PreluLayer: Dimensions should either match or one should be of size 1");
outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int batchSize = inputShapes[0][0];
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousCellStateIn
});
- BOOST_ASSERT(inferredShapes.size() == 3);
+ ARMNN_ASSERT(inferredShapes.size() == 3);
// Check if the weights are nullptr for basic params
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
"QLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
"QLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
"QLstmLayer: m_BasicParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
"QLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
if (!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
"QLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
"QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
ConditionalThrowIfNotEqual<LayerValidationException>(
}
else
{
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
"QLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
"QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should "
"not have a value when CIFG is enabled.");
- BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+ ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
ConditionalThrowIfNotEqual<LayerValidationException>(
if (m_Param.m_ProjectionEnabled)
{
- BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
"QLstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
- BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
+ ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
"QLstmLayer: m_ProjectionParameters.m_ProjectionBias should not be null.");
}
if (m_Param.m_PeepholeEnabled)
{
if (!m_Param.m_CifgEnabled) {
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
"QLstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
"when Peephole is enabled and CIFG is disabled.");
}
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
"QLstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
"QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
}
{
if(!m_Param.m_CifgEnabled)
{
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights should not be null.");
}
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
- BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
"QLstmLayer: m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
}
}
std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 3);
+ ARMNN_ASSERT(inputShapes.size() == 3);
// Get input values for validation
unsigned int numBatches = inputShapes[0][0];
GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() // previousOutputIn
});
- BOOST_ASSERT(inferredShapes.size() == 2);
+ ARMNN_ASSERT(inferredShapes.size() == 2);
// Check weights and bias for nullptr
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
- BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
+ ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
// Check output TensorShape(s) match inferred shape
auto inferredShapes = InferOutputShapes({ });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ReshapeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inputShape = inputShapes[0];
const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"RsqrtLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SliceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
IgnoreUnused(inputShapes);
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SpaceToBatchNdLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
TensorShape outputShape(inputShape);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
else
{
ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
- BOOST_ASSERT(handleFactory);
+ ARMNN_ASSERT(handleFactory);
CreateTensors(*handleFactory);
}
}
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
IgnoreUnused(inputShapes);
- BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+ ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
auto inferredShapes = InferOutputShapes(views);
- BOOST_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
+ ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
{
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
const unsigned int axis = m_Param.m_Axis;
- BOOST_ASSERT(axis <= inputNumDimensions);
+ ARMNN_ASSERT(axis <= inputNumDimensions);
std::vector<unsigned int> dimensionSizes(inputNumDimensions + 1, 0);
for (unsigned int i = 0; i < axis; ++i)
auto inferredShapes = InferOutputShapes(inputShapes);
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
TensorShape inputShape = inputShapes[0];
std::vector<unsigned int> outputShape;
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"StridedSlice: TensorShape set on OutputSlot[0] does not match the inferred shape.",
{
VerifyLayerConnections(2, CHECK_LOCATION());
- BOOST_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
+ ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
// Assuming first input is the Input and second input is the Constant
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 2);
+ ARMNN_ASSERT(inferredShapes.size() == 2);
ConditionalThrowIfNotEqual<LayerValidationException>(
"SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
TransposeConvolution2dQueueDescriptor descriptor;
descriptor.m_Weight = m_Weight.get();
if (m_Param.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
+ ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
descriptor.m_Bias = m_Bias.get();
}
std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 2);
+ ARMNN_ASSERT(inputShapes.size() == 2);
const TensorShape& inputShape = inputShapes[0];
const TensorShape& kernelShape = inputShapes[1];
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
unsigned int kernelElements = kernelShape[0] * kernelShape[dataLayoutIndex.GetChannelsIndex()];
unsigned int inputElements = batches * inputShape[dataLayoutIndex.GetChannelsIndex()];
- BOOST_ASSERT_MSG(inputElements != 0, "Invalid number of input elements");
- BOOST_ASSERT_MSG(kernelElements % inputElements == 0, "Invalid number of elements");
+ ARMNN_ASSERT_MSG(inputElements != 0, "Invalid number of input elements");
+ ARMNN_ASSERT_MSG(kernelElements % inputElements == 0, "Invalid number of elements");
unsigned int channels = kernelElements / inputElements;
{
VerifyLayerConnections(1, CHECK_LOCATION());
- BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
+ ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
m_Weight->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- BOOST_ASSERT(inputShapes.size() == 1);
+ ARMNN_ASSERT(inputShapes.size() == 1);
const TensorShape& inShape = inputShapes[0];
return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
}
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- BOOST_ASSERT(inferredShapes.size() == 1);
+ ARMNN_ASSERT(inferredShapes.size() == 1);
ConditionalThrowIfNotEqual<LayerValidationException>(
"TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Pad);
- BOOST_ASSERT(child.GetType() == LayerType::Convolution2d);
+ ARMNN_ASSERT(base.GetType() == LayerType::Pad);
+ ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d);
PadLayer* padLayer = boost::polymorphic_downcast<PadLayer*>(&base);
Convolution2dLayer* convolution2dLayer = boost::polymorphic_downcast<Convolution2dLayer*>(&child);
newConv2dLayer.GetOutputHandler().SetTensorInfo(outInfo);
// Copy weights and bias to the new convolution layer
- BOOST_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
"FoldPadIntoConvolution2d: Weights data should not be null.");
newConv2dLayer.m_Weight = std::move(convolution2dLayer->m_Weight);
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
+ ARMNN_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
"FoldPadIntoConvolution2d: Bias data should not be null if bias is enabled.");
newConv2dLayer.m_Bias = std::move(convolution2dLayer->m_Bias);
}
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Reshape);
- BOOST_ASSERT(child.GetType() == LayerType::Reshape);
+ ARMNN_ASSERT(base.GetType() == LayerType::Reshape);
+ ARMNN_ASSERT(child.GetType() == LayerType::Reshape);
OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
+ ARMNN_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
child.GetType() == LayerType::ConvertFp32ToFp16) ||
(base.GetType() == LayerType::ConvertFp32ToFp16 &&
child.GetType() == LayerType::ConvertFp16ToFp32));
{
// Validate base layer (the Permute) is compatible
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
- BOOST_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
+ ARMNN_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
const TensorInfo& inputInfo = base.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& intermediateInfo = base.GetOutputSlot(0).GetTensorInfo();
if (intermediateInfo.GetNumDimensions() != 4)
// Validate child layer (the BatchToSpace) is compatible
Layer& child = connection.GetOwningLayer();
- BOOST_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
+ ARMNN_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
const TensorInfo& outputInfo = child.GetOutputSlot(0).GetTensorInfo();
const BatchToSpaceNdDescriptor& batchToSpaceDesc = static_cast<BatchToSpaceNdLayer&>(child).GetParameters();
if (batchToSpaceDesc.m_DataLayout != DataLayout::NHWC)
{
if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
}
}
{
if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
}
else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
}
else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
{
- BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
- BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
+ ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
+ ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
}
}
{
for (auto&& inputLayer : network->GetGraph().GetInputLayers())
{
- BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+ ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
return inputLayer->GetOutputSlot(0).GetTensorInfo();
}
throw InvalidArgumentException("Network has no input layers");
#pragma once
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <QuantizeHelper.hpp>
-#include <boost/assert.hpp>
#include <boost/multi_array.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/random/uniform_real_distribution.hpp>
template <typename T, std::size_t n>
boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat)
{
- BOOST_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
+ ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
std::array<unsigned int, n> shape;
#include "TestUtils.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnn;
void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
unsigned int fromIndex, unsigned int toIndex)
{
- BOOST_ASSERT(from);
- BOOST_ASSERT(to);
+ ARMNN_ASSERT(from);
+ ARMNN_ASSERT(to);
from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
#include "GraphTopologicalSort.hpp"
#include "VerificationHelpers.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
// Caffe
void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam)
{
- BOOST_ASSERT(layerParam.type() == "Input");
+ ARMNN_ASSERT(layerParam.type() == "Input");
ValidateNumInputsOutputs(layerParam, 0, 1);
const InputParameter& param = layerParam.input_param();
unsigned int kernelW,
unsigned int kernelH)
{
- BOOST_ASSERT(layerParam.type() == "Convolution");
+ ARMNN_ASSERT(layerParam.type() == "Convolution");
ValidateNumInputsOutputs(layerParam, 1, 1);
ConvolutionParameter convParam = layerParam.convolution_param();
const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
// asusme these were already verified by the caller ParseConvLayer() function
- BOOST_ASSERT(numGroups < inputShape.dim(1));
- BOOST_ASSERT(numGroups > 1);
+ ARMNN_ASSERT(numGroups < inputShape.dim(1));
+ ARMNN_ASSERT(numGroups > 1);
// Handle grouping
armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
unsigned int kernelW,
unsigned int kernelH)
{
- BOOST_ASSERT(layerParam.type() == "Convolution");
+ ARMNN_ASSERT(layerParam.type() == "Convolution");
ValidateNumInputsOutputs(layerParam, 1, 1);
ConvolutionParameter convParam = layerParam.convolution_param();
// Not Available ArmNN Interface Parameters
// * Rounding policy;
- BOOST_ASSERT(layerParam.type() == "Convolution");
+ ARMNN_ASSERT(layerParam.type() == "Convolution");
ValidateNumInputsOutputs(layerParam, 1, 1);
ConvolutionParameter convParam = layerParam.convolution_param();
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <ParserHelper.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
INetworkPtr Deserializer::CreateNetworkFromGraph(GraphPtr graph)
{
m_Network = INetwork::Create();
- BOOST_ASSERT(graph != nullptr);
+ ARMNN_ASSERT(graph != nullptr);
unsigned int layerIndex = 0;
for (AnyLayer const* layer : *graph->layers())
{
// GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
LayerBindingId bindingId = GetBindingLayerInfo(graph, inputLayerIndex);
- BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
+ ARMNN_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
IConnectableLayer* inputLayer =
m_Network->AddInputLayer(bindingId, baseLayer->layerName()->c_str());
// GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
LayerBindingId bindingId = GetBindingLayerInfo(graph, outputLayerIndex);
- BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
+ ARMNN_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
IConnectableLayer* outputLayer =
m_Network->AddOutputLayer(bindingId, baseLayer->layerName()->c_str());
IConnectableLayer* layer)
{
CHECK_LAYERS(graph, 0, layerIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
if (baseLayer->outputSlots()->size() != layer->GetNumOutputSlots())
{
armnn::IConnectableLayer* layer)
{
CHECK_LAYERS(graph, 0, layerIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
if (baseLayer->inputSlots()->size() != layer->GetNumInputSlots())
{
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported pooling algorithm");
+ ARMNN_ASSERT_MSG(false, "Unsupported pooling algorithm");
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported output shape rounding");
+ ARMNN_ASSERT_MSG(false, "Unsupported output shape rounding");
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported padding method");
+ ARMNN_ASSERT_MSG(false, "Unsupported padding method");
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported data layout");
+ ARMNN_ASSERT_MSG(false, "Unsupported data layout");
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported normalization channel type");
+ ARMNN_ASSERT_MSG(false, "Unsupported normalization channel type");
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported normalization method type");
+ ARMNN_ASSERT_MSG(false, "Unsupported normalization method type");
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported data layout");
+ ARMNN_ASSERT_MSG(false, "Unsupported data layout");
}
}
#include <ArmnnSchema_generated.h>
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
flatbuffers::Parser parser;
bool ok = parser.Parse(schemafile.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
ok &= parser.Parse(m_JsonString.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
if (!ok)
{
#include "OnnxParser.hpp"
#include <armnn/Descriptors.hpp>
+#include <armnn/utility/Assert.hpp>
#include <VerificationHelpers.hpp>
#include <boost/format.hpp>
const IConnectableLayer* layer,
std::vector<TensorShape> inputShapes)
{
- BOOST_ASSERT(! outNames.empty());
+ ARMNN_ASSERT(! outNames.empty());
bool needCompute = std::any_of(outNames.begin(),
outNames.end(),
[this](std::string name)
if(needCompute)
{
inferredShapes = layer->InferOutputShapes(inputShapes);
- BOOST_ASSERT(inferredShapes.size() == outNames.size());
+ ARMNN_ASSERT(inferredShapes.size() == outNames.size());
}
for (uint i = 0; i < outNames.size(); ++i)
{
void OnnxParser::LoadGraph()
{
- BOOST_ASSERT(m_Graph.get() != nullptr);
+ ARMNN_ASSERT(m_Graph.get() != nullptr);
//Fill m_TensorsInfo with the shapes and value of every tensor
SetupInfo(m_Graph->mutable_output());
CreateConstTensor(weightName).first,
Optional<ConstTensor>(CreateConstTensor(biasName).first),
matmulNode.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
{m_TensorsInfo[inputName].m_info->GetShape(),
CreateConstTensor(weightName).first,
EmptyOptional(),
matmulNode.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
{m_TensorsInfo[inputName].m_info->GetShape(),
desc.m_PoolHeight = inputShape[2];
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
}
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// register the input connection slots for the layer, connections are made after all layers have been created
}
IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc)
{
- BOOST_ASSERT(node.op_type() == "Conv");
+ ARMNN_ASSERT(node.op_type() == "Conv");
DepthwiseConvolution2dDescriptor desc;
desc.m_PadLeft = convDesc.m_PadLeft;
EmptyOptional(),
node.name().c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[node.input(0)].m_info->GetShape(),
EmptyOptional(),
node.name().c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[node.input(0)].m_info->GetShape(),
auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
auto input0 = *m_TensorsInfo[inputs.first].m_info;
auto input1 = *m_TensorsInfo[inputs.second].m_info;
- BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+ ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
unsigned int numDims = input0.GetNumDimensions();
for (unsigned int i = 0; i < numDims; i++)
IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
{ m_TensorsInfo[inputs.first].m_info->GetShape(),
biasTensor.first,
scaleTensor.first,
node.name().c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIds.size() != layer->GetNumInputSlots())
{
throw ParseException(
void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIds.size() != layer->GetNumOutputSlots())
{
throw ParseException(
#include <armnn/Exceptions.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
// armnnUtils:
#include <flatbuffers/flexbuffers.h>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/filesystem.hpp>
{
// not checking model, because I assume CHECK_MODEL already run
// and checked that. An assert would do.
- BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
+ ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
// also subgraph index should be checked by CHECK_MODEL so
// I only add an assert here
- BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
+ ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
// the tensor index is the only one to check here
if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
armnn::Optional<armnn::PermutationVector&> permutationVector)
{
IgnoreUnused(tensorPtr);
- BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
- BOOST_ASSERT_MSG(bufferPtr != nullptr,
+ ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
+ ARMNN_ASSERT_MSG(bufferPtr != nullptr,
boost::str(
boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
IConnectableLayer *layer)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
- BOOST_ASSERT(operatorPtr->inputs.size() > 1);
+ ARMNN_ASSERT(operatorPtr->inputs.size() > 1);
uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
INetworkPtr TfLiteParser::CreateNetworkFromModel()
{
m_Network = INetwork::Create();
- BOOST_ASSERT(m_Model.get() != nullptr);
+ ARMNN_ASSERT(m_Model.get() != nullptr);
bool failedToCreate = false;
std::stringstream errors;
armnn::IOutputSlot* slot)
{
CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
- BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
- BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+ ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+ ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
armnn::IInputSlot* slot)
{
CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
- BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
- BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+ ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+ ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
tensorSlots.inputSlots.push_back(slot);
layerName.c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
EmptyOptional(),
layerName.c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
EmptyOptional(),
layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
break;
default:
- BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
+ ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
}
Pooling2dDescriptor desc;
IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
EmptyOptional(),
layerName.c_str());
}
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
// The model does not specify the output shapes.
// The output shapes are calculated from the max_detection and max_classes_per_detection.
auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
- BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
+ ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
const unsigned int splitDim = axisData[0];
auto inputDimSize = inputTensorInfo.GetNumDimensions();
const std::vector<unsigned int>& tensorIndexes)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIndexes.size() != layer->GetNumInputSlots())
{
throw ParseException(
const std::vector<unsigned int>& tensorIndexes)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
if (tensorIndexes.size() != layer->GetNumOutputSlots())
{
throw ParseException(
#include <armnn/IRuntime.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnnTfLiteParser/ITfLiteParser.hpp>
#include <test/TensorHelpers.hpp>
#include <boost/filesystem.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include "flatbuffers/idl.h"
flatbuffers::Parser parser;
bool ok = parser.Parse(schemafile.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
ok &= parser.Parse(m_JsonString.c_str());
- BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+ ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
if (!ok)
{
#include "../TfLiteParser.hpp"
#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/utility/Assert.hpp>
#include <layers/StandInLayer.hpp>
-#include <boost/assert.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/test/unit_test.hpp>
, m_StandInLayerVerifier(inputInfos, outputInfos)
{
const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
- BOOST_ASSERT(numInputs > 0);
+ ARMNN_ASSERT(numInputs > 0);
const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
- BOOST_ASSERT(numOutputs > 0);
+ ARMNN_ASSERT(numOutputs > 0);
m_JsonString = R"(
{
IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
{
- BOOST_ASSERT(m_Layer);
+ ARMNN_ASSERT(m_Layer);
// Assumes one-to-one mapping between Tf and armnn output slots.
unsigned int armnnOutputSlotIdx = tfOutputIndex;
if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
{
- BOOST_ASSERT(m_Representative);
+ ARMNN_ASSERT(m_Representative);
return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
}
m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
m_TensorInfo(tensorInfo)
{
- BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
+ ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
}
void CreateLayerDeferred() override
{
- BOOST_ASSERT(m_Layer == nullptr);
+ ARMNN_ASSERT(m_Layer == nullptr);
m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
}
ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
IgnoreUnused(graphDef);
- BOOST_ASSERT(nodeDef.op() == "Const");
+ ARMNN_ASSERT(nodeDef.op() == "Const");
if (nodeDef.attr().count("value") == 0)
{
TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
{
- BOOST_ASSERT(nodeDef.op() == "ExpandDims");
+ ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
if (inputTensorInfo.GetNumDimensions() > 4) {
throw ParseException(
size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
- BOOST_ASSERT(inputs.size() == 2);
- BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
- BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
- BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
+ ARMNN_ASSERT(inputs.size() == 2);
+ ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
+ ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
+ ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
{
IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
{
- BOOST_ASSERT(outputOfLeakyRelu != nullptr);
+ ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
const auto desc = TransposeDescriptor(permutationVector);
auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
- BOOST_ASSERT(layer);
+ ARMNN_ASSERT(layer);
input0Slot->Connect(layer->GetInputSlot(0));
TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
{
- BOOST_ASSERT(nodeDef.op() == "Squeeze");
+ ARMNN_ASSERT(nodeDef.op() == "Squeeze");
tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
DataType type;
void CreateLayerDeferred() override
{
- BOOST_ASSERT(m_Layer == nullptr);
+ ARMNN_ASSERT(m_Layer == nullptr);
m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
}
};
void CreateLayerDeferred() override
{
- BOOST_ASSERT(m_Layer == nullptr);
+ ARMNN_ASSERT(m_Layer == nullptr);
m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
}
};
}
layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
// SPDX-License-Identifier: MIT
//
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/test/unit_test.hpp>
#include "armnnTfParser/ITfParser.hpp"
{
AddNFixture(const std::vector<armnn::TensorShape> inputShapes, unsigned int numberOfInputs)
{
- BOOST_ASSERT(inputShapes.size() == numberOfInputs);
+ ARMNN_ASSERT(inputShapes.size() == numberOfInputs);
m_Prototext = "";
for (unsigned int i = 0; i < numberOfInputs; i++)
{
"} \n");
// Manual height computation based on stride parameter.
- BOOST_ASSERT_MSG(stride == 1 || stride == 2, "Add support for strides other than 1 or 2.");
+ ARMNN_ASSERT_MSG(stride == 1 || stride == 2, "Add support for strides other than 1 or 2.");
std::array<unsigned int, 4> dims;
if (dataLayout == "NHWC")
{
#include "DotSerializer.hpp"
-#include <boost/assert.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <sstream>
#include <cstring>
#include "BFloat16.hpp"
#include "Half.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnnUtils
{
size_t numElements,
void* dstFloat16Buffer)
{
- BOOST_ASSERT(srcFloat32Buffer != nullptr);
- BOOST_ASSERT(dstFloat16Buffer != nullptr);
+ ARMNN_ASSERT(srcFloat32Buffer != nullptr);
+ ARMNN_ASSERT(dstFloat16Buffer != nullptr);
armnn::Half* pHalf = reinterpret_cast<armnn::Half*>(dstFloat16Buffer);
size_t numElements,
float* dstFloat32Buffer)
{
- BOOST_ASSERT(srcFloat16Buffer != nullptr);
- BOOST_ASSERT(dstFloat32Buffer != nullptr);
+ ARMNN_ASSERT(srcFloat16Buffer != nullptr);
+ ARMNN_ASSERT(dstFloat32Buffer != nullptr);
const armnn::Half* pHalf = reinterpret_cast<const armnn::Half*>(srcFloat16Buffer);
size_t numElements,
void* dstBFloat16Buffer)
{
- BOOST_ASSERT(srcFloat32Buffer != nullptr);
- BOOST_ASSERT(dstBFloat16Buffer != nullptr);
+ ARMNN_ASSERT(srcFloat32Buffer != nullptr);
+ ARMNN_ASSERT(dstBFloat16Buffer != nullptr);
armnn::BFloat16* bf16 = reinterpret_cast<armnn::BFloat16*>(dstBFloat16Buffer);
size_t numElements,
float* dstFloat32Buffer)
{
- BOOST_ASSERT(srcBFloat16Buffer != nullptr);
- BOOST_ASSERT(dstFloat32Buffer != nullptr);
+ ARMNN_ASSERT(srcBFloat16Buffer != nullptr);
+ ARMNN_ASSERT(dstFloat32Buffer != nullptr);
const armnn::BFloat16* bf16 = reinterpret_cast<const armnn::BFloat16*>(srcBFloat16Buffer);
#pragma once
#include <armnn/Optional.hpp>
-#include <boost/assert.hpp>
#include <functional>
#include <map>
// Remove any preceding and trailing character specified in the characterSet.
std::string Strip(const std::string& originalString, const std::string& characterSet)
{
- BOOST_ASSERT(!characterSet.empty());
+ ARMNN_ASSERT(!characterSet.empty());
const std::size_t firstFound = originalString.find_first_not_of(characterSet);
const std::size_t lastFound = originalString.find_last_not_of(characterSet);
// Return empty if the originalString is empty or the originalString contains only to-be-striped characters
#include <algorithm>
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/variant/apply_visitor.hpp>
#include <cstddef>
#include <functional>
#include <armnnUtils/TensorUtils.hpp>
#include <armnn/backends/ITensorHandle.hpp>
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
const unsigned int firstAxisInclusive,
const unsigned int lastAxisExclusive)
{
- BOOST_ASSERT(firstAxisInclusive <= lastAxisExclusive);
- BOOST_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
+ ARMNN_ASSERT(firstAxisInclusive <= lastAxisExclusive);
+ ARMNN_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
unsigned int count = 1;
for (unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++)
{
unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
{
- BOOST_ASSERT_MSG(axis < boost::numeric_cast<int>(inputDimension),
+ ARMNN_ASSERT_MSG(axis < boost::numeric_cast<int>(inputDimension),
"Required axis index greater than number of dimensions.");
- BOOST_ASSERT_MSG(axis >= -boost::numeric_cast<int>(inputDimension),
+ ARMNN_ASSERT_MSG(axis >= -boost::numeric_cast<int>(inputDimension),
"Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
unsigned int GetNumElementsAfter(const armnn::TensorShape& shape, unsigned int axis)
{
unsigned int numDim = shape.GetNumDimensions();
- BOOST_ASSERT(axis <= numDim - 1);
+ ARMNN_ASSERT(axis <= numDim - 1);
unsigned int count = 1;
for (unsigned int i = axis; i < numDim; i++)
{
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData1, keepDims, outputTensorInfo1);
- BOOST_ASSERT(outputTensorInfo1.GetNumDimensions() == 1);
- BOOST_ASSERT(outputTensorInfo1.GetShape()[0] == 1);
+ BOOST_TEST(outputTensorInfo1.GetNumDimensions() == 1);
+ BOOST_TEST(outputTensorInfo1.GetShape()[0] == 1);
// Reducing dimension 0 results in a 3x4 size tensor (one dimension)
std::set<unsigned int> axisData2 = { 0 };
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData2, keepDims, outputTensorInfo2);
- BOOST_ASSERT(outputTensorInfo2.GetNumDimensions() == 1);
- BOOST_ASSERT(outputTensorInfo2.GetShape()[0] == 12);
+ BOOST_TEST(outputTensorInfo2.GetNumDimensions() == 1);
+ BOOST_TEST(outputTensorInfo2.GetShape()[0] == 12);
// Reducing dimensions 0,1 results in a 4 size tensor (one dimension)
std::set<unsigned int> axisData3 = { 0, 1 };
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData3, keepDims, outputTensorInfo3);
- BOOST_ASSERT(outputTensorInfo3.GetNumDimensions() == 1);
- BOOST_ASSERT(outputTensorInfo3.GetShape()[0] == 4);
+ BOOST_TEST(outputTensorInfo3.GetNumDimensions() == 1);
+ BOOST_TEST(outputTensorInfo3.GetShape()[0] == 4);
// Reducing dimension 0 results in a { 1, 3, 4 } dimension tensor
keepDims = true;
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData4, keepDims, outputTensorInfo4);
- BOOST_ASSERT(outputTensorInfo4.GetNumDimensions() == 3);
- BOOST_ASSERT(outputTensorInfo4.GetShape()[0] == 1);
- BOOST_ASSERT(outputTensorInfo4.GetShape()[1] == 3);
- BOOST_ASSERT(outputTensorInfo4.GetShape()[2] == 4);
+ BOOST_TEST(outputTensorInfo4.GetNumDimensions() == 3);
+ BOOST_TEST(outputTensorInfo4.GetShape()[0] == 1);
+ BOOST_TEST(outputTensorInfo4.GetShape()[1] == 3);
+ BOOST_TEST(outputTensorInfo4.GetShape()[2] == 4);
// Reducing dimension 1, 2 results in a { 2, 1, 1 } dimension tensor
keepDims = true;
CalculateReducedOutputTensoInfo(inputTensorInfo, axisData5, keepDims, outputTensorInfo5);
- BOOST_ASSERT(outputTensorInfo5.GetNumDimensions() == 3);
- BOOST_ASSERT(outputTensorInfo5.GetShape()[0] == 2);
- BOOST_ASSERT(outputTensorInfo5.GetShape()[1] == 1);
- BOOST_ASSERT(outputTensorInfo5.GetShape()[2] == 1);
+ BOOST_TEST(outputTensorInfo5.GetNumDimensions() == 3);
+ BOOST_TEST(outputTensorInfo5.GetShape()[0] == 2);
+ BOOST_TEST(outputTensorInfo5.GetShape()[1] == 1);
+ BOOST_TEST(outputTensorInfo5.GetShape()[2] == 1);
}
using armnnUtils::ConvertInt32ToOctalString;
std::string octalString = ConvertInt32ToOctalString(1);
- BOOST_ASSERT(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
+ BOOST_TEST(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
octalString = ConvertInt32ToOctalString(256);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
octalString = ConvertInt32ToOctalString(65536);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
octalString = ConvertInt32ToOctalString(16777216);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
octalString = ConvertInt32ToOctalString(-1);
- BOOST_ASSERT(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
octalString = ConvertInt32ToOctalString(-256);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
octalString = ConvertInt32ToOctalString(-65536);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
octalString = ConvertInt32ToOctalString(-16777216);
- BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
+ BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
}
BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
};
auto output_string = createAndConvert({5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 5\n"
"}"));
output_string = createAndConvert({4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 4\n"
"}\n"
));
output_string = createAndConvert({3, 4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 3\n"
"}\n"
));
output_string = createAndConvert({2, 3, 4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 2\n"
"}\n"
));
output_string = createAndConvert({1, 2, 3, 4, 5});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 1\n"
"}\n"
));
output_string = createAndConvert({0xffffffff, 0xffffffff});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 4294967295\n"
"}\n"
));
output_string = createAndConvert({1, 0});
- BOOST_ASSERT(output_string.compare(
+ BOOST_TEST(output_string.compare(
"dim {\n"
"size: 1\n"
"}\n"
case armnn::DataType::Signed32:
return arm_compute::DataType::S32;
default:
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
return arm_compute::DataType::UNKNOWN;
}
}
#include <armnn/Descriptors.hpp>
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <arm_compute/core/Types.h>
-#include <boost/assert.hpp>
-
namespace armnn
{
unsigned int dim = tensor.GetNumDimensions();
- BOOST_ASSERT(dim != 0);
+ ARMNN_ASSERT(dim != 0);
// Currently ArmNN support axis 1.
return dim - 1;
BaseMemoryManager::BaseMemoryManager(std::unique_ptr<arm_compute::IAllocator> alloc,
MemoryAffinity memoryAffinity)
{
- BOOST_ASSERT(alloc);
+ ARMNN_ASSERT(alloc);
m_Allocator = std::move(alloc);
m_IntraLayerMemoryMgr = CreateArmComputeMemoryManager(memoryAffinity);
static const size_t s_NumPools = 1;
// Allocate memory pools for intra-layer memory manager
- BOOST_ASSERT(m_IntraLayerMemoryMgr);
+ ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Allocate memory pools for inter-layer memory manager
- BOOST_ASSERT(m_InterLayerMemoryMgr);
+ ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
// Acquire inter-layer memory group. NOTE: This has to come after allocating the pools
- BOOST_ASSERT(m_InterLayerMemoryGroup);
+ ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->acquire();
}
void BaseMemoryManager::Release()
{
// Release inter-layer memory group. NOTE: This has to come before releasing the pools
- BOOST_ASSERT(m_InterLayerMemoryGroup);
+ ARMNN_ASSERT(m_InterLayerMemoryGroup);
m_InterLayerMemoryGroup->release();
// Release memory pools managed by intra-layer memory manager
- BOOST_ASSERT(m_IntraLayerMemoryMgr);
+ ARMNN_ASSERT(m_IntraLayerMemoryMgr);
m_IntraLayerMemoryMgr->clear();
// Release memory pools managed by inter-layer memory manager
- BOOST_ASSERT(m_InterLayerMemoryMgr);
+ ARMNN_ASSERT(m_InterLayerMemoryMgr);
m_InterLayerMemoryMgr->clear();
}
#else
void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
{
- BOOST_ASSERT(GetTensor<void>() == nullptr);
- BOOST_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
+ ARMNN_ASSERT(GetTensor<void>() == nullptr);
+ ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
if (srcMemory)
{
#include <algorithm>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
template <typename T>
const T* GetConstTensor() const
{
- BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+ ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<const T*>(m_Memory);
}
private:
// Only used for testing
- void CopyOutTo(void *) const override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
- void CopyInFrom(const void*) override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
+ void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
+ void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
template <typename T>
T* GetTensor() const
{
- BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+ ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<T*>(m_MutableMemory);
}
#pragma once
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
namespace armnn
case armnn::DataType::QAsymmS8:
return armnn::DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
case DataType::QSymmS16:
return nullptr;
default:
- BOOST_ASSERT_MSG(false, "Unknown DataType.");
+ ARMNN_ASSERT_MSG(false, "Unknown DataType.");
return nullptr;
}
}
if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end())
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
+ ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
info.m_InputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == expectedInputType;
{
if (expectedOutputType != expectedInputType)
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
}
else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end())
{
- BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+ ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
+ ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == expectedOutputType;
MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<QueueDescriptor>(descriptor, info)
{
- BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
info.m_InputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == InputDataType;
}),
"Trying to create workload with incorrect type");
- BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == OutputDataType;
{
if (!info.m_InputTensorInfos.empty())
{
- BOOST_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
+ ARMNN_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
"Trying to create workload with incorrect type");
}
- BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+ ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
info.m_OutputTensorInfos.end(),
[&](auto it){
return it.GetDataType() == DataType;
case DataType::QSymmS16:
return DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "Invalid input data type");
+ ARMNN_ASSERT_MSG(false, "Invalid input data type");
return DataType::Float32;
}
}
const TensorInfo input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const Convolution2dDescriptor& descriptor = cLayer->GetParameters();
const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
dataType);
const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
TensorInfo biasInfo;
const TensorInfo * biasInfoPtr = nullptr;
const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
biasInfoPtr = &biasInfo;
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Unexpected bias type");
+ ARMNN_ASSERT_MSG(false, "Unexpected bias type");
}
}
}
Optional<TensorInfo> biases;
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
GetBiasTypeFromWeightsType(dataType));
}
- BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+ ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
result = layerSupportObject->IsTransposeConvolution2dSupported(input,
}
default:
{
- BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
+ ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
reason.value() = "Unrecognised layer type";
result = false;
break;
armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
const PermutationVector& permutationVector, void* permuteBuffer)
{
- BOOST_ASSERT_MSG(tensor, "Invalid input tensor");
- BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+ ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
+ ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
TensorInfo tensorInfo = tensor->GetTensorInfo();
DataLayout dataLayout,
void* permuteBuffer)
{
- BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor");
- BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+ ARMNN_ASSERT_MSG(weightTensor, "Invalid input tensor");
+ ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
auto multiplier = weightTensor->GetTensorInfo().GetShape()[0];
auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
auto dstPtrChannel = dstData;
for (unsigned int w = 0; w < copyWidth; ++w)
{
- BOOST_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
- BOOST_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
+ ARMNN_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
+ ARMNN_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
copy(dstData, srcData, copyLength);
dstData += dstWidthStride;
srcData += srcWidthStride;
bool IsLayerSupported(const armnn::Layer* layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
armnn::LayerType layerType = layer->GetType();
switch (layerType)
bool IsLayerOptimizable(const armnn::Layer* layer)
{
- BOOST_ASSERT(layer != nullptr);
+ ARMNN_ASSERT(layer != nullptr);
// A Layer is not optimizable if its name contains "unoptimizable"
const std::string layerName(layer->GetName());
supportedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
{
- BOOST_ASSERT(supportedSubgraph != nullptr);
+ ARMNN_ASSERT(supportedSubgraph != nullptr);
PreCompiledLayer* preCompiledLayer =
optimizationViews.GetGraph().AddLayer<PreCompiledLayer>(
unsupportedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
{
- BOOST_ASSERT(unsupportedSubgraph != nullptr);
+ ARMNN_ASSERT(unsupportedSubgraph != nullptr);
optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
});
untouchedSubgraphs.end(),
[&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
{
- BOOST_ASSERT(untouchedSubgraph != nullptr);
+ ARMNN_ASSERT(untouchedSubgraph != nullptr);
optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
});
case armnn::DataType::QSymmS16:
return armnn::DataType::Signed32;
default:
- BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
}
return armnn::EmptyOptional();
}
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
- BOOST_ASSERT(workload != nullptr);
+ ARMNN_ASSERT(workload != nullptr);
std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
- BOOST_ASSERT(workloadRef != nullptr);
+ ARMNN_ASSERT(workloadRef != nullptr);
inputHandle->Allocate();
outputHandle->Allocate();
#include "ComparisonTestImpl.hpp"
-
+#include <armnn/utility/Assert.hpp>
#include <Half.hpp>
#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <test/TensorHelpers.hpp>
-#include <boost/assert.hpp>
-
namespace
{
int outQuantOffset)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
- BOOST_ASSERT(shape1.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
- BOOST_ASSERT(outShape.GetNumDimensions() == NumDims);
+ ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
auto input0 = MakeTensor<InType, NumDims>(inputTensorInfo0, values0);
}
else
{
- BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+ ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
"Input shapes must have the same number of dimensions");
}
}
unsigned int & concatDim,
std::pair<PermutationVector, PermutationVector> & permutations)
{
- BOOST_ASSERT_MSG(numDimensions <= 3,
+ ARMNN_ASSERT_MSG(numDimensions <= 3,
"Only dimensions 1,2 and 3 are supported by this helper");
unsigned int expandedBy = 3 - numDimensions;
unsigned int expandedConcatAxis = concatDim + expandedBy;
}
else
{
- BOOST_ASSERT(expandedConcatAxis == 0);
+ ARMNN_ASSERT(expandedConcatAxis == 0);
concatDim = 0;
}
}
std::vector<T>& outputData)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+ ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
// Nullptr is an error in the test. By returning without doing the concatenation
TensorInfo & outputTensorInfo)
{
IgnoreUnused(memoryManager);
- BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+ ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
unsigned int numDims = 0;
// Store the reverese permutation.
permuteVector = permutations.second;
- BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+ ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
"Test logic error, we don't need permutation, so we shouldn't arrive here");
}
else
{
- BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+ ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
"All inputs must have the same number of dimensions");
}
std::unique_ptr<ITensorHandle> && inputDataHandle,
T * data)
{
- BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+ ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
if (data == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
unsigned int concatDim,
bool useSubtensor)
{
- BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+ ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
if (output == nullptr)
{
// Nullptr is an error in the test. By returning without doing the permutation
void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
{
- BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+ ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
"Invalid type and parameter combination.");
- BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+ ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
"Invalid type and parameter combination.");
// Note we need to dequantize and re-quantize the image value and the bias.
for (uint32_t x = 0; x < w; ++x)
{
uint32_t offset = (i * h + y) * w + x;
- BOOST_ASSERT(offset < v.size());
+ ARMNN_ASSERT(offset < v.size());
T& outRef = v[offset];
float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- BOOST_ASSERT(inputNum == 1);
- BOOST_ASSERT(outputNum == 1);
+ ARMNN_ASSERT(inputNum == 1);
+ ARMNN_ASSERT(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Note these tensors will use two (identical) batches.
// If a bias is used, its size must equal the number of output channels.
bool biasEnabled = bias.size() > 0;
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Creates the tensors.
armnn::TensorInfo inputTensorInfo =
bool biasEnabled = bias.size() > 0;
// This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
- BOOST_ASSERT(inputNum == 1);
- BOOST_ASSERT(outputNum == 1);
+ ARMNN_ASSERT(inputNum == 1);
+ ARMNN_ASSERT(outputNum == 1);
// If a bias is used, its size must equal the number of output channels.
- BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+ ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Note these tensors will use two (identical) batches.
#pragma once
#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
#include <boost/multi_array.hpp>
template <std::size_t n>
boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
{
- BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+ ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
"Attempting to construct a shape array of mismatching size");
boost::array<unsigned int, n> shape;
outputHandle->Allocate();
CopyDataToITensorHandle(inputHandle.get(), input.origin());
- BOOST_ASSERT(workload);
+ ARMNN_ASSERT(workload);
ExecuteWorkload(*workload, memoryManager);
#include "ClContextControl.hpp"
#include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
#include <arm_compute/core/CL/OpenCL.h>
#include <arm_compute/core/CL/CLKernelLibrary.h>
return TuningLevel::Exhaustive;
default:
{
- BOOST_ASSERT_MSG(false, "Tuning level not recognised.");
+ ARMNN_ASSERT_MSG(false, "Tuning level not recognised.");
return TuningLevel::None;
}
}
#include <LeakChecking.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
// Removes the use of global CL context.
cl::Context::setDefault(cl::Context{});
- BOOST_ASSERT(cl::Context::getDefault()() == NULL);
+ ARMNN_ASSERT(cl::Context::getDefault()() == NULL);
// Removes the use of global CL command queue.
cl::CommandQueue::setDefault(cl::CommandQueue{});
- BOOST_ASSERT(cl::CommandQueue::getDefault()() == NULL);
+ ARMNN_ASSERT(cl::CommandQueue::getDefault()() == NULL);
// Always load the OpenCL runtime.
LoadOpenClRuntime();
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
}
default:
{
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
break;
}
}
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
arm_compute::ActivationLayerInfo(),
aclDilationInfo);
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
void ClDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
- BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
}
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
const ConstCpuTensorHandle* handle)
{
- BOOST_ASSERT(handle);
+ ARMNN_ASSERT(handle);
armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
switch(handle->GetTensorInfo().GetDataType())
CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
}
};
#include "NeonInterceptorScheduler.hpp"
-#include <boost/assert.hpp>
-
namespace armnn{
NeonInterceptorScheduler::NeonInterceptorScheduler(arm_compute::IScheduler &realScheduler)
#include <BFloat16.hpp>
#include <Half.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <aclCommon/ArmComputeTensorHandle.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
// If we have enabled Importing, don't manage the tensor
if (!m_IsImportEnabled)
{
- BOOST_ASSERT(m_MemoryGroup != nullptr);
+ ARMNN_ASSERT(m_MemoryGroup != nullptr);
m_MemoryGroup->manage(&m_Tensor);
}
}
#include "NeonTimer.hpp"
#include "NeonInterceptorScheduler.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include <memory>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
namespace armnn
void NeonTimer::Start()
{
m_Kernels.clear();
- BOOST_ASSERT(g_Interceptor->GetKernels() == nullptr);
+ ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
g_Interceptor->SetKernels(&m_Kernels);
m_RealSchedulerType = arm_compute::Scheduler::get_type();
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
arm_compute::ITensor& output =
boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
arm_compute::DataType computeDataType =
}
default:
{
- BOOST_ASSERT_MSG(false, "Unknown data type");
+ ARMNN_ASSERT_MSG(false, "Unknown data type");
break;
}
}
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
m_ConvolutionLayer.reset(convolutionLayer.release());
- BOOST_ASSERT(m_ConvolutionLayer);
+ ARMNN_ASSERT(m_ConvolutionLayer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
arm_compute::ActivationLayerInfo(),
aclDilationInfo);
- BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
void NeonDepthwiseConvolutionWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
- BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+ ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
m_pDepthwiseConvolutionLayer->run();
}
if (descriptor.m_BiasEnabled)
{
- BOOST_ASSERT(biases.has_value());
+ ARMNN_ASSERT(biases.has_value());
aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
optionalAclBiasesInfo = &aclBiasesInfo;
m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
- BOOST_ASSERT(m_Layer);
+ ARMNN_ASSERT(m_Layer);
InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
const ConstCpuTensorHandle* handle)
{
- BOOST_ASSERT(handle);
+ ARMNN_ASSERT(handle);
switch(handle->GetTensorInfo().GetDataType())
{
CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
}
};
"Reference concatenation: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference concatenation: input type not supported");
"Reference stack: output type not supported");
for (const TensorInfo* input : inputs)
{
- BOOST_ASSERT(input != nullptr);
+ ARMNN_ASSERT(input != nullptr);
supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
"Reference stack: input type not supported");
//
#include "RefMemoryManager.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <algorithm>
void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
{
- BOOST_ASSERT(pool);
+ ARMNN_ASSERT(pool);
m_FreePools.push_back(pool);
}
void* RefMemoryManager::Pool::GetPointer()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
return m_Pointer;
}
void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
m_Size = std::max(m_Size, numBytes);
}
void RefMemoryManager::Pool::Acquire()
{
- BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+ ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
m_Pointer = ::operator new(size_t(m_Size));
}
void RefMemoryManager::Pool::Release()
{
- BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+ ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
::operator delete(m_Pointer);
m_Pointer = nullptr;
}
void RefTensorHandle::Manage()
{
- BOOST_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
- BOOST_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+ ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+ ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
}
}
else
{
- BOOST_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+ ARMNN_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
return m_MemoryManager->GetPointer(m_Pool);
}
}
void RefTensorHandle::CopyOutTo(void* dest) const
{
const void *src = GetPointer();
- BOOST_ASSERT(src);
+ ARMNN_ASSERT(src);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
void RefTensorHandle::CopyInFrom(const void* src)
{
void *dest = GetPointer();
- BOOST_ASSERT(dest);
+ ARMNN_ASSERT(dest);
memcpy(dest, src, m_TensorInfo.GetNumBytes());
}
#pragma once
-#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
-
namespace armnn
{
TypedIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
return *this;
}
TypedIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
return *this;
}
TypedIterator& operator-=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= increment;
return *this;
}
TypedIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
{
IgnoreUnused(axisIndex);
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
}
// This should be called to set index for per-axis Encoder/Decoder
PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = axisIndex;
return *this;
PerAxisIterator& operator++() override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
++m_Iterator;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
PerAxisIterator& operator+=(const unsigned int increment) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator += increment;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
PerAxisIterator& operator-=(const unsigned int decrement) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator -= decrement;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
PerAxisIterator& operator[](const unsigned int index) override
{
- BOOST_ASSERT(m_Iterator);
+ ARMNN_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
return *this;
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
{
TensorShape inputShape = inputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
+ ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
TensorShape outputShape = outputTensorInfo.GetShape();
- BOOST_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
+ ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
const unsigned int inputBatchSize = inputShape[0];
const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
- BOOST_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
const unsigned int blockShapeHeight = blockShape[0];
const unsigned int blockShapeWidth = blockShape[1];
- BOOST_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
+ ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
const unsigned int cropsTop = cropsData[0].first;
const unsigned int cropsLeft = cropsData[1].first;
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
#include "ConvImpl.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cmath>
#include <limits>
QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
{
- BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+ ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
if (multiplier == 0.0f)
{
m_Multiplier = 0;
const double q = std::frexp(multiplier, &m_RightShift);
m_RightShift = -m_RightShift;
int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
- BOOST_ASSERT(qFixed <= (1ll << 31));
+ ARMNN_ASSERT(qFixed <= (1ll << 31));
if (qFixed == (1ll << 31))
{
qFixed /= 2;
--m_RightShift;
}
- BOOST_ASSERT(m_RightShift >= 0);
- BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
+ ARMNN_ASSERT(m_RightShift >= 0);
+ ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
m_Multiplier = static_cast<int32_t>(qFixed);
}
}
int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
{
- BOOST_ASSERT(exponent >= 0 && exponent <= 31);
+ ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
int32_t mask = (1 << exponent) - 1;
int32_t remainder = x & mask;
int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <cmath>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
break;
}
}
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
using namespace armnnUtils;
unsigned int dataTypeSize)
{
const unsigned int blockSize = descriptor.m_BlockSize;
- BOOST_ASSERT(blockSize != 0u);
+ ARMNN_ASSERT(blockSize != 0u);
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int batches = inputShape[0];
const TensorInfo& outputInfo)
{
IgnoreUnused(outputInfo);
- BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+ ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
// inputDecoder.Get() dequantizes the data element from whatever
#include "DetectionPostProcess.hpp"
+#include <armnn/utility/Assert.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <algorithm>
// xmax
boxCorners[indexW] = xCentre + halfW;
- BOOST_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
- BOOST_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
+ ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
+ ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
}
unsigned int numClassesWithBg = desc.m_NumClasses + 1;
#include <armnnUtils/TensorUtils.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
}
default:
{
- BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
break;
}
}
}
default:
{
- BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+ ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
break;
}
}
#include "RefWorkloadUtils.hpp"
-#include <boost/assert.hpp>
-
namespace armnn
{
{
unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
- BOOST_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
+ ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
unsigned int startOffset = indx * paramsProduct;
unsigned int endOffset = startOffset + paramsProduct;
}
}
- BOOST_ASSERT(outIndex == outputInfo.GetNumElements());
+ ARMNN_ASSERT(outIndex == outputInfo.GetNumElements());
}
} //namespace armnn
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <cmath>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
const unsigned int numDimensions = inputInfo.GetNumDimensions();
bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
- BOOST_ASSERT_MSG(axisIsValid,
+ ARMNN_ASSERT_MSG(axisIsValid,
"Axis index is not in range [-numDimensions, numDimensions).");
IgnoreUnused(axisIsValid);
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- BOOST_ASSERT(boost::numeric_cast<float>(current) <
+ ARMNN_ASSERT(boost::numeric_cast<float>(current) <
(std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
#include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <cstring>
{
const ConstantQueueDescriptor& data = this->m_Data;
- BOOST_ASSERT(data.m_LayerOutput != nullptr);
+ ARMNN_ASSERT(data.m_LayerOutput != nullptr);
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
- BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+ ARMNN_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
outputInfo.GetNumBytes());
void RefFullyConnectedWorkload::PostAllocationConfigure()
{
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
+ ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
m_InputShape = inputInfo.GetShape();
m_InputDecoder = MakeDecoder<float>(inputInfo);
#include <Profiling.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
- BOOST_ASSERT(decoder != nullptr);
- BOOST_ASSERT(encoder != nullptr);
+ ARMNN_ASSERT(decoder != nullptr);
+ ARMNN_ASSERT(encoder != nullptr);
LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
}
if (!m_Data.m_Parameters.m_Axis)
{
float* output = GetOutputTensorData<float>(0, m_Data);
- BOOST_ASSERT(output != nullptr);
+ ARMNN_ASSERT(output != nullptr);
unsigned int numInputs = m_Data.m_Parameters.m_NumInputs;
unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
DataType inputDataType = inputInfo.GetDataType();
DataType outputDataType = outputInfo.GetDataType();
- BOOST_ASSERT(inputDataType == outputDataType);
+ ARMNN_ASSERT(inputDataType == outputDataType);
IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
#include "Slice.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
const TensorShape& inputShape = inputInfo.GetShape();
const unsigned int numDims = inputShape.GetNumDimensions();
- BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
- BOOST_ASSERT(descriptor.m_Size.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Begin.size() == numDims);
+ ARMNN_ASSERT(descriptor.m_Size.size() == numDims);
constexpr unsigned int maxNumDims = 4;
- BOOST_ASSERT(numDims <= maxNumDims);
+ ARMNN_ASSERT(numDims <= maxNumDims);
std::vector<unsigned int> paddedInput(4);
std::vector<unsigned int> paddedBegin(4);
unsigned int size2 = paddedSize[2];
unsigned int size3 = paddedSize[3];
- BOOST_ASSERT(begin0 + size0 <= dim0);
- BOOST_ASSERT(begin1 + size1 <= dim1);
- BOOST_ASSERT(begin2 + size2 <= dim2);
- BOOST_ASSERT(begin3 + size3 <= dim3);
+ ARMNN_ASSERT(begin0 + size0 <= dim0);
+ ARMNN_ASSERT(begin1 + size1 <= dim1);
+ ARMNN_ASSERT(begin2 + size2 <= dim2);
+ ARMNN_ASSERT(begin3 + size3 <= dim3);
const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
/// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
{
- BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index greater than number of dimensions.");
- BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+ ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
"Required axis index lower than negative of the number of dimensions");
unsigned int uAxis = axis < 0 ?
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include "Splitter.hpp"
#include <cmath>
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
#include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
//Split view extents are defined by the size of (the corresponding) input tensor.
const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
- BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+ ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
// Check all dimensions to see if this element is inside the given input view.
bool insideView = true;
//We are within the view, to copy input data to the output corresponding to this view.
DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
- BOOST_ASSERT(outputData);
+ ARMNN_ASSERT(outputData);
const DataType* inputData = GetInputTensorData<DataType>(0, data);
- BOOST_ASSERT(inputData);
+ ARMNN_ASSERT(inputData);
outputData[outIndex] = inputData[index];
}
#include <ResolveType.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/numeric/conversion/cast.hpp>
#include <cstring>
void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
{
- BOOST_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+ ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
const unsigned int beginIndicesCount =
boost::numeric_cast<unsigned int>(p.m_Begin.size());
- BOOST_ASSERT(dimCount >= beginIndicesCount);
+ ARMNN_ASSERT(dimCount >= beginIndicesCount);
const unsigned int padCount = dimCount - beginIndicesCount;
p.m_Begin.resize(dimCount);
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
namespace armnn
{
, m_Data(data)
, m_DataLayout(dataLayout)
{
- BOOST_ASSERT(m_Shape.GetNumDimensions() == 4);
+ ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
}
DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
m_CommandHandlerRegistry.GetFunctor(packet.GetPacketFamily(),
packet.GetPacketId(),
version.GetEncodedValue());
- BOOST_ASSERT(commandHandlerFunctor);
+ ARMNN_ASSERT(commandHandlerFunctor);
commandHandlerFunctor->operator()(packet);
}
catch (const armnn::TimeoutException&)
#include "CommandHandlerRegistry.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
#include <boost/format.hpp>
namespace armnn
uint32_t packetId,
uint32_t version)
{
- BOOST_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
+ ARMNN_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
CommandHandlerKey key(familyId, packetId, version);
registry[key] = functor;
void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor)
{
- BOOST_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
+ ARMNN_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
RegisterFunctor(functor, functor->GetFamilyId(), functor->GetPacketId(), functor->GetVersion());
}
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
// Create the category
CategoryPtr category = std::make_unique<Category>(categoryName);
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
// Get the raw category pointer
const Category* categoryPtr = category.get();
- BOOST_ASSERT(categoryPtr);
+ ARMNN_ASSERT(categoryPtr);
// Register the category
m_Categories.insert(std::move(category));
// Create the device
DevicePtr device = std::make_unique<Device>(deviceUid, deviceName, cores);
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
// Get the raw device pointer
const Device* devicePtr = device.get();
- BOOST_ASSERT(devicePtr);
+ ARMNN_ASSERT(devicePtr);
// Register the device
m_Devices.insert(std::make_pair(deviceUid, std::move(device)));
// Get the counter set UID
uint16_t counterSetUid = GetNextUid();
- BOOST_ASSERT(counterSetUid == counterSetUidPeek);
+ ARMNN_ASSERT(counterSetUid == counterSetUidPeek);
// Create the counter set
CounterSetPtr counterSet = std::make_unique<CounterSet>(counterSetUid, counterSetName, count);
- BOOST_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet);
// Get the raw counter set pointer
const CounterSet* counterSetPtr = counterSet.get();
- BOOST_ASSERT(counterSetPtr);
+ ARMNN_ASSERT(counterSetPtr);
// Register the counter set
m_CounterSets.insert(std::make_pair(counterSetUid, std::move(counterSet)));
// Get the parent category
const CategoryPtr& parentCategory = *categoryIt;
- BOOST_ASSERT(parentCategory);
+ ARMNN_ASSERT(parentCategory);
// Check that a counter with the given name is not already registered within the parent category
const std::vector<uint16_t>& parentCategoryCounters = parentCategory->m_Counters;
for (uint16_t parentCategoryCounterUid : parentCategoryCounters)
{
const Counter* parentCategoryCounter = GetCounter(parentCategoryCounterUid);
- BOOST_ASSERT(parentCategoryCounter);
+ ARMNN_ASSERT(parentCategoryCounter);
if (parentCategoryCounter->m_Name == name)
{
// Get the counter UIDs and calculate the max counter UID
std::vector<uint16_t> counterUids = GetNextCounterUids(uid, deviceCores);
- BOOST_ASSERT(!counterUids.empty());
+ ARMNN_ASSERT(!counterUids.empty());
uint16_t maxCounterUid = deviceCores <= 1 ? counterUids.front() : counterUids.back();
// Get the counter units
unitsValue,
deviceUidValue,
counterSetUidValue);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Get the raw counter pointer
const Counter* counterPtr = counter.get();
- BOOST_ASSERT(counterPtr);
+ ARMNN_ASSERT(counterPtr);
// Process multiple counters if necessary
for (uint16_t counterUid : counterUids)
}
const Category* category = it->get();
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
return category;
}
}
const Device* device = it->second.get();
- BOOST_ASSERT(device);
- BOOST_ASSERT(device->m_Uid == deviceUid);
+ ARMNN_ASSERT(device);
+ ARMNN_ASSERT(device->m_Uid == deviceUid);
return device;
}
}
const CounterSet* counterSet = it->second.get();
- BOOST_ASSERT(counterSet);
- BOOST_ASSERT(counterSet->m_Uid == counterSetUid);
+ ARMNN_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet->m_Uid == counterSetUid);
return counterSet;
}
}
const Counter* counter = it->second.get();
- BOOST_ASSERT(counter);
- BOOST_ASSERT(counter->m_Uid <= counterUid);
- BOOST_ASSERT(counter->m_Uid <= counter->m_MaxCounterUid);
+ ARMNN_ASSERT(counter);
+ ARMNN_ASSERT(counter->m_Uid <= counterUid);
+ ARMNN_ASSERT(counter->m_Uid <= counter->m_MaxCounterUid);
return counter;
}
{
return std::find_if(m_Categories.begin(), m_Categories.end(), [&categoryName](const CategoryPtr& category)
{
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
return category->m_Name == categoryName;
});
{
return std::find_if(m_Devices.begin(), m_Devices.end(), [&deviceName](const auto& pair)
{
- BOOST_ASSERT(pair.second);
- BOOST_ASSERT(pair.second->m_Uid == pair.first);
+ ARMNN_ASSERT(pair.second);
+ ARMNN_ASSERT(pair.second->m_Uid == pair.first);
return pair.second->m_Name == deviceName;
});
{
return std::find_if(m_CounterSets.begin(), m_CounterSets.end(), [&counterSetName](const auto& pair)
{
- BOOST_ASSERT(pair.second);
- BOOST_ASSERT(pair.second->m_Uid == pair.first);
+ ARMNN_ASSERT(pair.second);
+ ARMNN_ASSERT(pair.second->m_Uid == pair.first);
return pair.second->m_Name == counterSetName;
});
{
return std::find_if(m_Counters.begin(), m_Counters.end(), [&counterName](const auto& pair)
{
- BOOST_ASSERT(pair.second);
- BOOST_ASSERT(pair.second->m_Uid == pair.first);
+ ARMNN_ASSERT(pair.second);
+ ARMNN_ASSERT(pair.second->m_Uid == pair.first);
return pair.second->m_Name == counterName;
});
// Get the associated device
const DevicePtr& device = deviceIt->second;
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
// Get the number of cores of the associated device
return device->m_Cores;
bool FileOnlyProfilingConnection::WritePacket(const unsigned char* buffer, uint32_t length)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
// Read Header and determine case
uint32_t outgoingHeaderAsWords[2];
try
{
// Setup the profiling connection
- BOOST_ASSERT(m_ProfilingConnectionFactory);
+ ARMNN_ASSERT(m_ProfilingConnectionFactory);
m_ProfilingConnection = m_ProfilingConnectionFactory->GetProfilingConnection(m_Options);
}
catch (const Exception& e)
// "NotConnected" state
break;
case ProfilingState::WaitingForAck:
- BOOST_ASSERT(m_ProfilingConnection);
+ ARMNN_ASSERT(m_ProfilingConnection);
// Start the command thread
m_CommandHandler.Start(*m_ProfilingConnection);
void ProfilingService::AddBackendProfilingContext(const BackendId backendId,
std::shared_ptr<armnn::profiling::IBackendProfilingContext> profilingContext)
{
- BOOST_ASSERT(profilingContext != nullptr);
+ ARMNN_ASSERT(profilingContext != nullptr);
// Register the backend counters
m_MaxGlobalCounterId = profilingContext->RegisterCounters(m_MaxGlobalCounterId);
m_BackendProfilingContexts.emplace(backendId, std::move(profilingContext));
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->load(std::memory_order::memory_order_relaxed);
}
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
counterValuePtr->store(value, std::memory_order::memory_order_relaxed);
}
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->fetch_add(value, std::memory_order::memory_order_relaxed);
}
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->fetch_sub(value, std::memory_order::memory_order_relaxed);
}
{
CheckCounterUid(counterUid);
std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
- BOOST_ASSERT(counterValuePtr);
+ ARMNN_ASSERT(counterValuePtr);
return counterValuePtr->operator++(std::memory_order::memory_order_relaxed);
}
"Network loads",
"The number of networks loaded at runtime",
std::string("networks"));
- BOOST_ASSERT(loadedNetworksCounter);
+ ARMNN_ASSERT(loadedNetworksCounter);
InitializeCounterValue(loadedNetworksCounter->m_Uid);
}
// Register a counter for the number of unloaded networks
"Network unloads",
"The number of networks unloaded at runtime",
std::string("networks"));
- BOOST_ASSERT(unloadedNetworksCounter);
+ ARMNN_ASSERT(unloadedNetworksCounter);
InitializeCounterValue(unloadedNetworksCounter->m_Uid);
}
// Register a counter for the number of registered backends
"Backends registered",
"The number of registered backends",
std::string("backends"));
- BOOST_ASSERT(registeredBackendsCounter);
+ ARMNN_ASSERT(registeredBackendsCounter);
InitializeCounterValue(registeredBackendsCounter->m_Uid);
}
// Register a counter for the number of registered backends
"Backends unregistered",
"The number of unregistered backends",
std::string("backends"));
- BOOST_ASSERT(unregisteredBackendsCounter);
+ ARMNN_ASSERT(unregisteredBackendsCounter);
InitializeCounterValue(unregisteredBackendsCounter->m_Uid);
}
// Register a counter for the number of inferences run
"Inferences run",
"The number of inferences run",
std::string("inferences"));
- BOOST_ASSERT(inferencesRunCounter);
+ ARMNN_ASSERT(inferencesRunCounter);
InitializeCounterValue(inferencesRunCounter->m_Uid);
}
}
IProfilingConnectionFactory* other,
IProfilingConnectionFactory*& backup)
{
- BOOST_ASSERT(instance.m_ProfilingConnectionFactory);
- BOOST_ASSERT(other);
+ ARMNN_ASSERT(instance.m_ProfilingConnectionFactory);
+ ARMNN_ASSERT(other);
backup = instance.m_ProfilingConnectionFactory.release();
instance.m_ProfilingConnectionFactory.reset(other);
#include <WallClockTimer.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <fstream>
#include <iostream>
void WriteBytes(const IPacketBufferPtr& packetBuffer, unsigned int offset, const void* value, unsigned int valueSize)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteBytes(packetBuffer->GetWritableData(), offset, value, valueSize);
}
void WriteUint64(const std::unique_ptr<IPacketBuffer>& packetBuffer, unsigned int offset, uint64_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint64(packetBuffer->GetWritableData(), offset, value);
}
void WriteUint32(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint32_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint32(packetBuffer->GetWritableData(), offset, value);
}
void WriteUint16(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint16_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint16(packetBuffer->GetWritableData(), offset, value);
}
void WriteUint8(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint8_t value)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
WriteUint8(packetBuffer->GetWritableData(), offset, value);
}
void WriteBytes(unsigned char* buffer, unsigned int offset, const void* value, unsigned int valueSize)
{
- BOOST_ASSERT(buffer);
- BOOST_ASSERT(value);
+ ARMNN_ASSERT(buffer);
+ ARMNN_ASSERT(value);
for (unsigned int i = 0; i < valueSize; i++, offset++)
{
void WriteUint64(unsigned char* buffer, unsigned int offset, uint64_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value & 0xFF);
buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
void WriteUint32(unsigned char* buffer, unsigned int offset, uint32_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value & 0xFF);
buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
void WriteUint16(unsigned char* buffer, unsigned int offset, uint16_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value & 0xFF);
buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
void WriteUint8(unsigned char* buffer, unsigned int offset, uint8_t value)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
buffer[offset] = static_cast<unsigned char>(value);
}
void ReadBytes(const IPacketBufferPtr& packetBuffer, unsigned int offset, unsigned int valueSize, uint8_t outValue[])
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
ReadBytes(packetBuffer->GetReadableData(), offset, valueSize, outValue);
}
uint64_t ReadUint64(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint64(packetBuffer->GetReadableData(), offset);
}
uint32_t ReadUint32(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint32(packetBuffer->GetReadableData(), offset);
}
uint16_t ReadUint16(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint16(packetBuffer->GetReadableData(), offset);
}
uint8_t ReadUint8(const IPacketBufferPtr& packetBuffer, unsigned int offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
return ReadUint8(packetBuffer->GetReadableData(), offset);
}
void ReadBytes(const unsigned char* buffer, unsigned int offset, unsigned int valueSize, uint8_t outValue[])
{
- BOOST_ASSERT(buffer);
- BOOST_ASSERT(outValue);
+ ARMNN_ASSERT(buffer);
+ ARMNN_ASSERT(outValue);
for (unsigned int i = 0; i < valueSize; i++, offset++)
{
uint64_t ReadUint64(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
uint64_t value = 0;
value = static_cast<uint64_t>(buffer[offset]);
uint32_t ReadUint32(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
uint32_t value = 0;
value = static_cast<uint32_t>(buffer[offset]);
uint16_t ReadUint16(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
uint32_t value = 0;
value = static_cast<uint32_t>(buffer[offset]);
uint8_t ReadUint8(const unsigned char* buffer, unsigned int offset)
{
- BOOST_ASSERT(buffer);
+ ARMNN_ASSERT(buffer);
return buffer[offset];
}
// Read TimelineMessageDirectoryPacket from given IPacketBuffer and offset
SwTraceMessage ReadSwTraceMessage(const unsigned char* packetBuffer, unsigned int& offset)
{
- BOOST_ASSERT(packetBuffer);
+ ARMNN_ASSERT(packetBuffer);
unsigned int uint32_t_size = sizeof(uint32_t);
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
#include <Processes.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
{
using namespace boost::numeric;
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
const std::string& categoryName = category->m_Name;
- BOOST_ASSERT(!categoryName.empty());
+ ARMNN_ASSERT(!categoryName.empty());
// Remove any duplicate counters
std::vector<uint16_t> categoryCounters;
DeviceRecord& deviceRecord,
std::string& errorMessage)
{
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
uint16_t deviceUid = device->m_Uid;
const std::string& deviceName = device->m_Name;
uint16_t deviceCores = device->m_Cores;
- BOOST_ASSERT(!deviceName.empty());
+ ARMNN_ASSERT(!deviceName.empty());
// Device record word 0:
// 16:31 [16] uid: the unique identifier for the device
CounterSetRecord& counterSetRecord,
std::string& errorMessage)
{
- BOOST_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet);
uint16_t counterSetUid = counterSet->m_Uid;
const std::string& counterSetName = counterSet->m_Name;
uint16_t counterSetCount = counterSet->m_Count;
- BOOST_ASSERT(!counterSetName.empty());
+ ARMNN_ASSERT(!counterSetName.empty());
// Counter set record word 0:
// 16:31 [16] uid: the unique identifier for the counter_set
{
using namespace boost::numeric;
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
uint16_t counterUid = counter->m_Uid;
uint16_t maxCounterUid = counter->m_MaxCounterUid;
const std::string& counterDescription = counter->m_Description;
const std::string& counterUnits = counter->m_Units;
- BOOST_ASSERT(counterClass == 0 || counterClass == 1);
- BOOST_ASSERT(counterInterpolation == 0 || counterInterpolation == 1);
- BOOST_ASSERT(counterMultiplier);
+ ARMNN_ASSERT(counterClass == 0 || counterClass == 1);
+ ARMNN_ASSERT(counterInterpolation == 0 || counterInterpolation == 1);
+ ARMNN_ASSERT(counterMultiplier);
// Utils
size_t uint32_t_size = sizeof(uint32_t);
// 0:63 [64] multiplier: internal data stream is represented as integer values, this allows scaling of
// those values as if they are fixed point numbers. Zero is not a valid value
uint32_t multiplier[2] = { 0u, 0u };
- BOOST_ASSERT(sizeof(counterMultiplier) == sizeof(multiplier));
+ ARMNN_ASSERT(sizeof(counterMultiplier) == sizeof(multiplier));
std::memcpy(multiplier, &counterMultiplier, sizeof(multiplier));
uint32_t eventRecordWord3 = multiplier[0];
uint32_t eventRecordWord4 = multiplier[1];
#include "armnn/profiling/ISendTimelinePacket.hpp"
#include "ProfilingUtils.hpp"
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
#include <memory>
try
{
ReserveBuffer();
- BOOST_ASSERT(m_WriteBuffer);
+ ARMNN_ASSERT(m_WriteBuffer);
unsigned int numberOfBytesWritten = 0;
// Header will be prepended to the buffer on Commit()
while ( true )
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
{
// Create the category
CategoryPtr category = std::make_unique<Category>(categoryName);
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
// Get the raw category pointer
const Category* categoryPtr = category.get();
- BOOST_ASSERT(categoryPtr);
+ ARMNN_ASSERT(categoryPtr);
// Register the category
m_Categories.insert(std::move(category));
// Create the device
DevicePtr device = std::make_unique<Device>(deviceUid, deviceName, cores);
- BOOST_ASSERT(device);
+ ARMNN_ASSERT(device);
// Get the raw device pointer
const Device* devicePtr = device.get();
- BOOST_ASSERT(devicePtr);
+ ARMNN_ASSERT(devicePtr);
// Register the device
m_Devices.insert(std::make_pair(deviceUid, std::move(device)));
// Create the counter set
CounterSetPtr counterSet = std::make_unique<CounterSet>(counterSetUid, counterSetName, count);
- BOOST_ASSERT(counterSet);
+ ARMNN_ASSERT(counterSet);
// Get the raw counter set pointer
const CounterSet* counterSetPtr = counterSet.get();
- BOOST_ASSERT(counterSetPtr);
+ ARMNN_ASSERT(counterSetPtr);
// Register the counter set
m_CounterSets.insert(std::make_pair(counterSetUid, std::move(counterSet)));
// Get the counter UIDs and calculate the max counter UID
std::vector<uint16_t> counterUids = GetNextCounterUids(uid, deviceCores);
- BOOST_ASSERT(!counterUids.empty());
+ ARMNN_ASSERT(!counterUids.empty());
uint16_t maxCounterUid = deviceCores <= 1 ? counterUids.front() : counterUids.back();
// Get the counter units
unitsValue,
deviceUidValue,
counterSetUidValue);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Get the raw counter pointer
const Counter* counterPtr = counter.get();
- BOOST_ASSERT(counterPtr);
+ ARMNN_ASSERT(counterPtr);
// Process multiple counters if necessary
for (uint16_t counterUid : counterUids)
{
// Connect the counter to the parent category
Category* parentCategory = const_cast<Category*>(GetCategory(parentCategoryName));
- BOOST_ASSERT(parentCategory);
+ ARMNN_ASSERT(parentCategory);
parentCategory->m_Counters.push_back(counterUid);
// Register the counter
{
auto it = std::find_if(m_Categories.begin(), m_Categories.end(), [&name](const CategoryPtr& category)
{
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
return category->m_Name == name;
});
unsigned int& offset,
uint32_t packetDataLength)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
uint32_t relationshipTypeUint = 0;
switch (relationshipType)
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
const unsigned char* readableData,
unsigned int& offset)
{
- BOOST_ASSERT(readableData);
+ ARMNN_ASSERT(readableData);
// Utils
unsigned int uint32_t_size = sizeof(uint32_t);
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
"",
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
counterUnits,
deviceUid,
counterSetUid);
- BOOST_ASSERT(counter);
+ ARMNN_ASSERT(counter);
// Create an event record
SendCounterPacket::EventRecord eventRecord;
// Create a category for testing
const std::string categoryName = "some_category";
const CategoryPtr category = std::make_unique<Category>(categoryName);
- BOOST_ASSERT(category);
+ ARMNN_ASSERT(category);
category->m_Counters = { 11u, 23u, 5670u };
// Create a collection of counters
Counter* counter1 = counters.find(11)->second.get();
Counter* counter2 = counters.find(23)->second.get();
Counter* counter3 = counters.find(5670)->second.get();
- BOOST_ASSERT(counter1);
- BOOST_ASSERT(counter2);
- BOOST_ASSERT(counter3);
+ ARMNN_ASSERT(counter1);
+ ARMNN_ASSERT(counter2);
+ ARMNN_ASSERT(counter3);
uint16_t categoryEventCount = boost::numeric_cast<uint16_t>(counters.size());
// Create a category record
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
#include "CaffePreprocessor.hpp"
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <iostream>
#include "InferenceTest.hpp"
#include "DeepSpeechV1Database.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
{
armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
- BOOST_ASSERT(output1.size() == k_OutputSize1);
+ ARMNN_ASSERT(output1.size() == k_OutputSize1);
const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c
- BOOST_ASSERT(output2.size() == k_OutputSize2);
+ ARMNN_ASSERT(output2.size() == k_OutputSize2);
const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h
- BOOST_ASSERT(output3.size() == k_OutputSize3);
+ ARMNN_ASSERT(output3.size() == k_OutputSize3);
// Check each output to see whether it is the expected value
for (unsigned int j = 0u; j < output1.size(); j++)
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
return EXIT_FAILURE;
}
#include <armnnUtils/Permute.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <iostream>
#include <armnn/ArmNN.hpp>
#include <armnn/BackendRegistry.hpp>
+#include <armnn/utility/Assert.hpp>
#if defined(ARMNN_SERIALIZER)
#include "armnnDeserializer/IDeserializer.hpp"
std::vector<armnn::BindingPointInfo>& outputBindings)
{
auto parser(IParser::Create());
- BOOST_ASSERT(parser);
+ ARMNN_ASSERT(parser);
armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
//
#include "InferenceTest.hpp"
+#include <armnn/utility/Assert.hpp>
+
#include "../src/armnn/Profiling.hpp"
#include <boost/algorithm/string.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/filesystem/path.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem/operations.hpp>
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
std::cerr << "Fatal internal error: " << e.what() << std::endl;
return false;
}
success = false;
break;
default:
- BOOST_ASSERT_MSG(false, "Unexpected TestCaseResult");
+ ARMNN_ASSERT_MSG(false, "Unexpected TestCaseResult");
return false;
}
}
//
#include "InferenceTest.hpp"
+#include <armnn/utility/Assert.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/filesystem/path.hpp>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem/operations.hpp>
void operator()(const std::vector<int>& values)
{
IgnoreUnused(values);
- BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
+ ARMNN_ASSERT_MSG(false, "Non-float predictions output not supported.");
}
ResultMap& GetResultMap() { return m_ResultMap; }
const armnn::TensorShape* inputTensorShape)
{
- BOOST_ASSERT(modelFilename);
- BOOST_ASSERT(inputBindingName);
- BOOST_ASSERT(outputBindingName);
+ ARMNN_ASSERT(modelFilename);
+ ARMNN_ASSERT(inputBindingName);
+ ARMNN_ASSERT(outputBindingName);
return InferenceTestMain(argc, argv, defaultTestCaseIds,
[=]
//
#include "InferenceTestImage.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
const unsigned int pixelOffset = x * GetNumChannels() + y * GetWidth() * GetNumChannels();
const uint8_t* const pixelData = m_Data.data() + pixelOffset;
- BOOST_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes()));
+ ARMNN_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes()));
std::array<uint8_t, 3> outPixelData;
outPixelData.fill(0);
#include <armnn/Logging.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
+
#include <fstream>
#include <vector>
#include "InferenceTest.hpp"
#include "MobileNetSsdDatabase.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
- BOOST_ASSERT(output1.size() == k_OutputSize1);
+ ARMNN_ASSERT(output1.size() == k_OutputSize1);
const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // classes
- BOOST_ASSERT(output2.size() == k_OutputSize2);
+ ARMNN_ASSERT(output2.size() == k_OutputSize2);
const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // scores
- BOOST_ASSERT(output3.size() == k_OutputSize3);
+ ARMNN_ASSERT(output3.size() == k_OutputSize3);
const std::vector<float>& output4 = boost::get<std::vector<float>>(this->GetOutputs()[3]); // valid detections
- BOOST_ASSERT(output4.size() == k_OutputSize4);
+ ARMNN_ASSERT(output4.size() == k_OutputSize4);
const size_t numDetections = boost::numeric_cast<size_t>(output4[0]);
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
std::cerr << "Fatal internal error: " << e.what() << std::endl;
return 1;
}
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
std::cerr << "Fatal internal error: " << e.what() << std::endl;
return 1;
}
// Coverity points out that default_value(...) can throw a bad_lexical_cast,
// and that desc.add_options() can throw boost::io::too_few_args.
// They really won't in any of these cases.
- BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+ ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
return EXIT_FAILURE;
}
#include <tuple>
#include <utility>
-#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include "InferenceTest.hpp"
#include "YoloDatabase.hpp"
+#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <algorithm>
#include <array>
#include <utility>
-#include <boost/assert.hpp>
#include <boost/multi_array.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
using Boost3dArray = boost::multi_array<float, 3>;
const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);
- BOOST_ASSERT(output.size() == YoloOutputSize);
+ ARMNN_ASSERT(output.size() == YoloOutputSize);
constexpr Boost3dArray::index gridSize = 7;
constexpr Boost3dArray::index numClasses = 20;
}
}
}
- BOOST_ASSERT(output.data() + YoloOutputSize == outputPtr);
+ ARMNN_ASSERT(output.data() + YoloOutputSize == outputPtr);
std::vector<YoloDetectedObject> detectedObjects;
detectedObjects.reserve(gridSize * gridSize * numScales * numClasses);
profiling::CommandHandlerFunctor* commandHandlerFunctor =
m_HandlerRegistry.GetFunctor(packetRx.GetPacketFamily(), packetRx.GetPacketId(), version.GetEncodedValue());
- BOOST_ASSERT(commandHandlerFunctor);
+ ARMNN_ASSERT(commandHandlerFunctor);
commandHandlerFunctor->operator()(packetRx);
return packetRx;
}
commandHandler(packet1);
commandHandler(packet2);
- BOOST_ASSERT(commandHandler.m_CurrentPeriodValue == 5000);
+ ARMNN_ASSERT(commandHandler.m_CurrentPeriodValue == 5000);
for (size_t i = 0; i < commandHandler.m_CounterCaptureValues.m_Uids.size(); ++i)
{
- BOOST_ASSERT(commandHandler.m_CounterCaptureValues.m_Uids[i] == i);
+ ARMNN_ASSERT(commandHandler.m_CounterCaptureValues.m_Uids[i] == i);
}
}