IVGCVSW-4485 Remove Boost assert
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 1 Apr 2020 15:51:23 +0000 (16:51 +0100)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Mon, 6 Apr 2020 08:06:01 +0000 (09:06 +0100)
 * Change boost assert to armnn assert
 * Change include file to armnn assert
 * Fix ARMNN_ASSERT_MSG issue with multiple conditions
 * Change BOOST_ASSERT to BOOST_TEST where appropriate
 * Remove unused include statements

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff

194 files changed:
include/armnn/utility/Assert.hpp
include/armnnUtils/DataLayoutIndexed.hpp
include/armnnUtils/TensorUtils.hpp
src/armnn/Descriptors.cpp
src/armnn/Graph.cpp
src/armnn/Graph.hpp
src/armnn/InternalTypes.cpp
src/armnn/Layer.cpp
src/armnn/LayerSupport.cpp
src/armnn/LoadedNetwork.cpp
src/armnn/Logging.cpp
src/armnn/Network.cpp
src/armnn/NetworkQuantizerUtils.cpp
src/armnn/NetworkQuantizerUtils.hpp
src/armnn/NetworkUtils.cpp
src/armnn/Optimizer.cpp
src/armnn/OutputHandler.cpp
src/armnn/OutputHandler.hpp
src/armnn/OverrideInputRangeVisitor.cpp
src/armnn/Profiling.cpp
src/armnn/QuantizerVisitor.cpp
src/armnn/Runtime.cpp
src/armnn/SubgraphView.cpp
src/armnn/SubgraphViewSelector.cpp
src/armnn/Tensor.cpp
src/armnn/TypesUtils.cpp
src/armnn/layers/AbsLayer.cpp
src/armnn/layers/ActivationLayer.cpp
src/armnn/layers/ArgMinMaxLayer.cpp
src/armnn/layers/BatchNormalizationLayer.cpp
src/armnn/layers/BatchToSpaceNdLayer.cpp
src/armnn/layers/ComparisonLayer.cpp
src/armnn/layers/ConcatLayer.cpp
src/armnn/layers/ConvertBf16ToFp32Layer.cpp
src/armnn/layers/ConvertFp16ToFp32Layer.cpp
src/armnn/layers/ConvertFp32ToBf16Layer.cpp
src/armnn/layers/ConvertFp32ToFp16Layer.cpp
src/armnn/layers/Convolution2dLayer.cpp
src/armnn/layers/DebugLayer.cpp
src/armnn/layers/DepthToSpaceLayer.cpp
src/armnn/layers/DepthwiseConvolution2dLayer.cpp
src/armnn/layers/DequantizeLayer.cpp
src/armnn/layers/DetectionPostProcessLayer.cpp
src/armnn/layers/ElementwiseBaseLayer.cpp
src/armnn/layers/ElementwiseUnaryLayer.cpp
src/armnn/layers/FakeQuantizationLayer.cpp
src/armnn/layers/FloorLayer.cpp
src/armnn/layers/FullyConnectedLayer.cpp
src/armnn/layers/InstanceNormalizationLayer.cpp
src/armnn/layers/L2NormalizationLayer.cpp
src/armnn/layers/LogSoftmaxLayer.cpp
src/armnn/layers/LstmLayer.cpp
src/armnn/layers/MeanLayer.cpp
src/armnn/layers/MemCopyLayer.cpp
src/armnn/layers/MemImportLayer.cpp
src/armnn/layers/MergeLayer.cpp
src/armnn/layers/NormalizationLayer.cpp
src/armnn/layers/PermuteLayer.cpp
src/armnn/layers/Pooling2dLayer.cpp
src/armnn/layers/PreluLayer.cpp
src/armnn/layers/QLstmLayer.cpp
src/armnn/layers/QuantizedLstmLayer.cpp
src/armnn/layers/ReshapeLayer.cpp
src/armnn/layers/ResizeLayer.cpp
src/armnn/layers/RsqrtLayer.cpp
src/armnn/layers/SliceLayer.cpp
src/armnn/layers/SoftmaxLayer.cpp
src/armnn/layers/SpaceToBatchNdLayer.cpp
src/armnn/layers/SpaceToDepthLayer.cpp
src/armnn/layers/SplitterLayer.cpp
src/armnn/layers/StackLayer.cpp
src/armnn/layers/StridedSliceLayer.cpp
src/armnn/layers/SwitchLayer.cpp
src/armnn/layers/TransposeConvolution2dLayer.cpp
src/armnn/layers/TransposeLayer.cpp
src/armnn/optimizations/FoldPadIntoConvolution2d.hpp
src/armnn/optimizations/OptimizeConsecutiveReshapes.hpp
src/armnn/optimizations/OptimizeInverseConversions.hpp
src/armnn/optimizations/PermuteAndBatchToSpaceAsDepthToSpace.hpp
src/armnn/test/OptimizerTests.cpp
src/armnn/test/QuantizerTest.cpp
src/armnn/test/TensorHelpers.hpp
src/armnn/test/TestUtils.cpp
src/armnnCaffeParser/CaffeParser.cpp
src/armnnDeserializer/Deserializer.cpp
src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
src/armnnOnnxParser/OnnxParser.cpp
src/armnnTfLiteParser/TfLiteParser.cpp
src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
src/armnnTfLiteParser/test/Unsupported.cpp
src/armnnTfParser/TfParser.cpp
src/armnnTfParser/test/AddN.cpp
src/armnnTfParser/test/Convolution2d.cpp
src/armnnUtils/DotSerializer.cpp
src/armnnUtils/FloatingPointConverter.cpp
src/armnnUtils/GraphTopologicalSort.hpp
src/armnnUtils/ModelAccuracyChecker.cpp
src/armnnUtils/ModelAccuracyChecker.hpp
src/armnnUtils/TensorUtils.cpp
src/armnnUtils/test/ParserHelperTest.cpp
src/armnnUtils/test/PrototxtConversionsTest.cpp
src/backends/aclCommon/ArmComputeTensorUtils.cpp
src/backends/aclCommon/ArmComputeUtils.hpp
src/backends/aclCommon/BaseMemoryManager.cpp
src/backends/backendsCommon/CpuTensorHandle.cpp
src/backends/backendsCommon/CpuTensorHandle.hpp
src/backends/backendsCommon/LayerSupportRules.hpp
src/backends/backendsCommon/MakeWorkloadHelper.hpp
src/backends/backendsCommon/Workload.hpp
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/WorkloadUtils.cpp
src/backends/backendsCommon/WorkloadUtils.hpp
src/backends/backendsCommon/test/MockBackend.cpp
src/backends/backendsCommon/test/WorkloadTestUtils.hpp
src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
src/backends/cl/ClBackendContext.cpp
src/backends/cl/ClContextControl.cpp
src/backends/cl/workloads/ClConstantWorkload.cpp
src/backends/cl/workloads/ClConvolution2dWorkload.cpp
src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
src/backends/cl/workloads/ClWorkloadUtils.hpp
src/backends/neon/NeonInterceptorScheduler.cpp
src/backends/neon/NeonTensorHandle.hpp
src/backends/neon/NeonTimer.cpp
src/backends/neon/workloads/NeonConstantWorkload.cpp
src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
src/backends/neon/workloads/NeonWorkloadUtils.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefMemoryManager.cpp
src/backends/reference/RefTensorHandle.cpp
src/backends/reference/workloads/BaseIterator.hpp
src/backends/reference/workloads/BatchToSpaceNd.cpp
src/backends/reference/workloads/Concatenate.cpp
src/backends/reference/workloads/ConvImpl.cpp
src/backends/reference/workloads/ConvImpl.hpp
src/backends/reference/workloads/Decoders.hpp
src/backends/reference/workloads/DepthToSpace.cpp
src/backends/reference/workloads/Dequantize.cpp
src/backends/reference/workloads/DetectionPostProcess.cpp
src/backends/reference/workloads/Encoders.hpp
src/backends/reference/workloads/FullyConnected.cpp
src/backends/reference/workloads/Gather.cpp
src/backends/reference/workloads/LogSoftmax.cpp
src/backends/reference/workloads/Mean.cpp
src/backends/reference/workloads/RefConstantWorkload.cpp
src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
src/backends/reference/workloads/RefStackWorkload.cpp
src/backends/reference/workloads/RefStridedSliceWorkload.cpp
src/backends/reference/workloads/Slice.cpp
src/backends/reference/workloads/Softmax.cpp
src/backends/reference/workloads/Splitter.cpp
src/backends/reference/workloads/Splitter.hpp
src/backends/reference/workloads/StridedSlice.cpp
src/backends/reference/workloads/TensorBufferArrayView.hpp
src/profiling/CommandHandler.cpp
src/profiling/CommandHandlerRegistry.cpp
src/profiling/CounterDirectory.cpp
src/profiling/FileOnlyProfilingConnection.cpp
src/profiling/ProfilingService.cpp
src/profiling/ProfilingService.hpp
src/profiling/ProfilingUtils.cpp
src/profiling/SendCounterPacket.cpp
src/profiling/SendTimelinePacket.hpp
src/profiling/test/ProfilingMocks.hpp
src/profiling/test/ProfilingTestUtils.cpp
src/profiling/test/SendCounterPacketTests.cpp
src/profiling/test/SendCounterPacketTests.hpp
tests/CaffePreprocessor.cpp
tests/DeepSpeechV1InferenceTest.hpp
tests/ExecuteNetwork/ExecuteNetwork.cpp
tests/ImagePreprocessor.cpp
tests/InferenceModel.hpp
tests/InferenceTest.cpp
tests/InferenceTest.inl
tests/InferenceTestImage.cpp
tests/MnistDatabase.cpp
tests/MobileNetSsdInferenceTest.hpp
tests/ModelAccuracyTool-Armnn/ModelAccuracyTool-Armnn.cpp
tests/MultipleNetworksCifar10/MultipleNetworksCifar10.cpp
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
tests/YoloDatabase.cpp
tests/YoloInferenceTest.hpp
tests/profiling/gatordmock/GatordMockService.cpp
tests/profiling/gatordmock/tests/GatordMockTests.cpp

index 4d2f47b..455775f 100644 (file)
@@ -12,7 +12,7 @@ namespace armnn
 
 #ifndef NDEBUG
 #   define ARMNN_ASSERT(COND) assert(COND)
-#   define ARMNN_ASSERT_MSG(COND, MSG) assert(COND && MSG)
+#   define ARMNN_ASSERT_MSG(COND, MSG) assert((COND) && MSG)
 #else
 #   define ARMNN_ASSERT(COND)
 #   define ARMNN_ASSERT_MSG(COND, MSG)
index c6701f7..e377cc5 100644 (file)
@@ -8,7 +8,7 @@
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnnUtils
 {
@@ -28,12 +28,12 @@ public:
                                  unsigned int batchIndex, unsigned int channelIndex,
                                  unsigned int heightIndex, unsigned int widthIndex) const
     {
-        BOOST_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) );
-        BOOST_ASSERT( channelIndex < shape[m_ChannelsIndex] ||
+        ARMNN_ASSERT( batchIndex < shape[0] || ( shape[0] == 0 && batchIndex == 0 ) );
+        ARMNN_ASSERT( channelIndex < shape[m_ChannelsIndex] ||
                     ( shape[m_ChannelsIndex] == 0 && channelIndex == 0) );
-        BOOST_ASSERT( heightIndex < shape[m_HeightIndex] ||
+        ARMNN_ASSERT( heightIndex < shape[m_HeightIndex] ||
                     ( shape[m_HeightIndex] == 0 && heightIndex == 0) );
-        BOOST_ASSERT( widthIndex < shape[m_WidthIndex] ||
+        ARMNN_ASSERT( widthIndex < shape[m_WidthIndex] ||
                     ( shape[m_WidthIndex] == 0 && widthIndex == 0) );
 
         /// Offset the given indices appropriately depending on the data layout
index fbfb8f4..cc5f780 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <armnn/TypesUtils.hpp>
 
-#include <boost/assert.hpp>
-
 namespace armnnUtils
 {
 armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
index 95f9b5d..8f4df79 100644 (file)
@@ -5,6 +5,8 @@
 #include "armnn/Descriptors.hpp"
 #include "armnn/Logging.hpp"
 
+#include <armnn/utility/Assert.hpp>
+
 #include <algorithm>
 #include <array>
 #include <vector>
@@ -195,7 +197,7 @@ const uint32_t* OriginsDescriptor::GetViewOrigin(uint32_t idx) const
 // Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
 void OriginsDescriptor::ReorderOrigins(unsigned int*  newOrdering, unsigned int numNewOrdering)
 {
-    BOOST_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
+    ARMNN_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
         "elements in the new ordering array");
     std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]);
 
index 0d326ad..78b08ec 100644 (file)
@@ -13,9 +13,9 @@
 #include <armnn/Logging.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <boost/polymorphic_cast.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 #include <unordered_map>
@@ -142,7 +142,7 @@ Status Graph::SerializeToDot(std::ostream& stream)
 Status Graph::AllocateDynamicBuffers()
 {
     // Layers must be sorted in topological order
-    BOOST_ASSERT(m_LayersInOrder);
+    ARMNN_ASSERT(m_LayersInOrder);
 
     std::unordered_set<const ITensorHandle*> preallocatedTensors;
     std::unordered_map<const ITensorHandle*, unsigned int> handleReferenceCounts;
@@ -268,7 +268,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
     auto MayNeedCompatibilityLayer = [](const Layer& layer)
     {
         // All layers should have been associated with a valid compute device at this point.
-        BOOST_ASSERT(layer.GetBackendId() != Compute::Undefined);
+        ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
         // Does not need another compatibility layer if a copy or import layer is already present.
         return layer.GetType() != LayerType::MemCopy &&
                layer.GetType() != LayerType::MemImport;
@@ -282,7 +282,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
 
     ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
     {
-        BOOST_ASSERT(srcLayer);
+        ARMNN_ASSERT(srcLayer);
 
         if (!MayNeedCompatibilityLayer(*srcLayer))
         {
@@ -299,10 +299,10 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
             for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
             {
                 InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
-                BOOST_ASSERT(dstInputSlot);
+                ARMNN_ASSERT(dstInputSlot);
 
                 EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
-                BOOST_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
+                ARMNN_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
                                  "Undefined memory strategy found while adding copy layers for compatibility");
 
                 const Layer& dstLayer = dstInputSlot->GetOwningLayer();
@@ -325,7 +325,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
                     }
                     else
                     {
-                        BOOST_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
+                        ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
                         compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
                     }
 
@@ -395,7 +395,7 @@ void Graph::AddCompatibilityLayers(std::map<BackendId, std::unique_ptr<IBackendI
 
 void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
 {
-    BOOST_ASSERT(substituteLayer != nullptr);
+    ARMNN_ASSERT(substituteLayer != nullptr);
 
     ReplaceSubgraphConnections(subgraph, substituteLayer);
     EraseSubgraphLayers(subgraph);
@@ -420,7 +420,7 @@ void Graph::SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& subst
 
 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectableLayer* substituteLayer)
 {
-    BOOST_ASSERT(substituteLayer != nullptr);
+    ARMNN_ASSERT(substituteLayer != nullptr);
 
     // Create a new sub-graph with only the given layer, using
     // the given sub-graph as a reference of which parent graph to use
@@ -430,13 +430,13 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, IConnectabl
 
 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
 {
-    BOOST_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
+    ARMNN_ASSERT_MSG(!substituteSubgraph.GetLayers().empty(), "New sub-graph used for substitution must not be empty");
 
     const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
     std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
     {
         IgnoreUnused(layer);
-        BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
+        ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
                          "Substitute layer is not a member of graph");
     });
 
@@ -449,8 +449,8 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
     const SubgraphView::InputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetInputSlots();
     const SubgraphView::OutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetOutputSlots();
 
-    BOOST_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
-    BOOST_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
+    ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
+    ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
 
     // Disconnect the sub-graph and replace it with the substitute sub-graph
 
@@ -458,14 +458,14 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
     for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
     {
         InputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
-        BOOST_ASSERT(subgraphInputSlot);
+        ARMNN_ASSERT(subgraphInputSlot);
 
         IOutputSlot* connectedOutputSlot = subgraphInputSlot->GetConnection();
-        BOOST_ASSERT(connectedOutputSlot);
+        ARMNN_ASSERT(connectedOutputSlot);
         connectedOutputSlot->Disconnect(*subgraphInputSlot);
 
         IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
-        BOOST_ASSERT(substituteInputSlot);
+        ARMNN_ASSERT(substituteInputSlot);
         connectedOutputSlot->Connect(*substituteInputSlot);
     }
 
@@ -473,10 +473,10 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
     for(unsigned int outputSlotIdx = 0; outputSlotIdx < subgraphNumOutputSlots; ++outputSlotIdx)
     {
         OutputSlot* subgraphOutputSlot = subgraphOutputSlots.at(outputSlotIdx);
-        BOOST_ASSERT(subgraphOutputSlot);
+        ARMNN_ASSERT(subgraphOutputSlot);
 
         OutputSlot* substituteOutputSlot = substituteSubgraphOutputSlots.at(outputSlotIdx);
-        BOOST_ASSERT(substituteOutputSlot);
+        ARMNN_ASSERT(substituteOutputSlot);
         subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
     }
 }
index 63bc8d0..00ab8de 100644 (file)
@@ -11,6 +11,7 @@
 #include <armnn/TensorFwd.hpp>
 #include <armnn/NetworkFwd.hpp>
 #include <armnn/Exceptions.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <list>
 #include <map>
@@ -18,7 +19,6 @@
 #include <unordered_set>
 #include <vector>
 
-#include <boost/assert.hpp>
 #include <boost/iterator/transform_iterator.hpp>
 
 namespace armnn
@@ -115,8 +115,8 @@ public:
             otherLayer->Reparent(*this, m_Layers.end());
         });
 
-        BOOST_ASSERT(other.m_PosInGraphMap.empty());
-        BOOST_ASSERT(other.m_Layers.empty());
+        ARMNN_ASSERT(other.m_PosInGraphMap.empty());
+        ARMNN_ASSERT(other.m_Layers.empty());
 
         return *this;
     }
@@ -298,7 +298,7 @@ private:
 
         const size_t numErased = graph.m_PosInGraphMap.erase(this);
         IgnoreUnused(numErased);
-        BOOST_ASSERT(numErased == 1);
+        ARMNN_ASSERT(numErased == 1);
     }
 
 protected:
@@ -356,7 +356,7 @@ public:
     {
         const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
         IgnoreUnused(numErased);
-        BOOST_ASSERT(numErased == 1);
+        ARMNN_ASSERT(numErased == 1);
     }
 };
 
@@ -382,14 +382,14 @@ public:
     {
         const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
         IgnoreUnused(numErased);
-        BOOST_ASSERT(numErased == 1);
+        ARMNN_ASSERT(numErased == 1);
     }
 };
 
 inline Graph::Iterator Graph::GetPosInGraph(Layer& layer)
 {
     auto it = m_PosInGraphMap.find(&layer);
-    BOOST_ASSERT(it != m_PosInGraphMap.end());
+    ARMNN_ASSERT(it != m_PosInGraphMap.end());
     return it->second;
 }
 
@@ -429,7 +429,7 @@ inline LayerT* Graph::InsertNewLayer(OutputSlot& insertAfter, Args&&... args)
     const Iterator pos = std::next(GetPosInGraph(owningLayer));
     LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
 
-    BOOST_ASSERT(layer->GetNumInputSlots() == 1);
+    ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
 
     insertAfter.MoveAllConnections(layer->GetOutputSlot());
     insertAfter.Connect(layer->GetInputSlot(0));
@@ -449,7 +449,7 @@ inline void Graph::EraseLayer(Iterator pos)
 template <typename LayerT>
 inline void Graph::EraseLayer(LayerT*& layer)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     EraseLayer(GetPosInGraph(*layer));
     layer = nullptr;
 }
index 2fe38fc..a9435b2 100644 (file)
@@ -5,7 +5,7 @@
 
 #include "InternalTypes.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -75,7 +75,7 @@ char const* GetLayerTypeAsCString(LayerType type)
         case LayerType::TransposeConvolution2d: return "TransposeConvolution2d";
         case LayerType::Transpose: return "Transpose";
         default:
-            BOOST_ASSERT_MSG(false, "Unknown layer type");
+            ARMNN_ASSERT_MSG(false, "Unknown layer type");
             return "Unknown";
     }
 }
index 29d85b5..024a188 100644 (file)
@@ -19,7 +19,7 @@ namespace armnn
 
 void InputSlot::Insert(Layer& layer)
 {
-    BOOST_ASSERT(layer.GetNumOutputSlots() == 1);
+    ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
 
     OutputSlot* const prevSlot = GetConnectedOutputSlot();
 
@@ -29,7 +29,7 @@ void InputSlot::Insert(Layer& layer)
         prevSlot->Disconnect(*this);
 
         // Connects inserted layer to parent.
-        BOOST_ASSERT(layer.GetNumInputSlots() == 1);
+        ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
         int idx = prevSlot->Connect(layer.GetInputSlot(0));
         prevSlot->SetEdgeStrategy(boost::numeric_cast<unsigned int>(idx), EdgeStrategy::Undefined);
 
@@ -72,7 +72,7 @@ bool OutputSlot::IsTensorInfoSet() const
 
 bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
 {
-    BOOST_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
+    ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
     return shape == m_OutputHandler.GetTensorInfo().GetShape();
 }
 
@@ -113,7 +113,7 @@ void OutputSlot::MoveAllConnections(OutputSlot& destination)
 {
     while (GetNumConnections() > 0)
     {
-        BOOST_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
+        ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
             "Cannot move connections once memory strategies have be established.");
 
         InputSlot& connection = *GetConnection(0);
@@ -131,7 +131,7 @@ unsigned int OutputSlot::CalculateIndexOnOwner() const
             return i;
         }
     }
-    BOOST_ASSERT_MSG(false, "Did not find slot on owner.");
+    ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
     return 0; // Error
 }
 
@@ -223,7 +223,7 @@ void Layer::CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const
     for (auto&& inputSlot : GetInputSlots())
     {
         // The graph must be well-formed at this point.
-        BOOST_ASSERT(inputSlot.GetConnection());
+        ARMNN_ASSERT(inputSlot.GetConnection());
         const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
         dataCollector.Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
     }
@@ -255,7 +255,7 @@ void Layer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
         else
         {
             ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
-            BOOST_ASSERT(handleFactory);
+            ARMNN_ASSERT(handleFactory);
             handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
         }
     }
@@ -337,7 +337,7 @@ LayerPriority Layer::GetPriority() const
 
 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
 {
-    BOOST_ASSERT(GetNumInputSlots() == expectedConnections);
+    ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
 
     for (unsigned int i=0; i<expectedConnections; ++i)
     {
@@ -370,8 +370,8 @@ void Layer::VerifyLayerConnections(unsigned int expectedConnections, const Check
 
 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(GetNumInputSlots() != 0);
-    BOOST_ASSERT(GetNumOutputSlots() != 0);
+    ARMNN_ASSERT(GetNumInputSlots() != 0);
+    ARMNN_ASSERT(GetNumOutputSlots() != 0);
 
     // By default we return what we got, meaning the output shape(s) are the same as the input(s).
     // This only works if the number of inputs and outputs are the same. Since we are in the Layer
index 73e54b3..fe5b542 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <armnn/backends/IBackendInternal.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <cstring>
 #include <algorithm>
@@ -144,7 +144,7 @@ bool IsConcatSupported(const BackendId& backend,
                        char* reasonIfUnsupported,
                        size_t reasonIfUnsupportedMaxLength)
 {
-    BOOST_ASSERT(inputs.size() > 0);
+    ARMNN_ASSERT(inputs.size() > 0);
 
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsConcatSupported, inputs, output, descriptor);
 }
@@ -418,7 +418,7 @@ bool IsMergerSupported(const BackendId& backend,
                        char* reasonIfUnsupported,
                        size_t reasonIfUnsupportedMaxLength)
 {
-    BOOST_ASSERT(inputs.size() > 0);
+    ARMNN_ASSERT(inputs.size() > 0);
 
     ARMNN_NO_DEPRECATE_WARN_BEGIN
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMergerSupported, inputs, output, descriptor);
index 9d181e5..9da988b 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
@@ -22,7 +23,6 @@
 #include <LabelsAndEventClasses.hpp>
 
 #include <boost/polymorphic_cast.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 namespace armnn
@@ -55,7 +55,7 @@ void AddLayerStructure(std::unique_ptr<TimelineUtilityMethods>& timelineUtils,
     for (auto&& input : layer.GetInputSlots())
     {
         const IOutputSlot* source = input.GetConnectedOutputSlot();
-        BOOST_ASSERT(source != NULL);
+        ARMNN_ASSERT(source != NULL);
         timelineUtils->CreateConnectionRelationship(ProfilingRelationshipType::RetentionLink,
                                                     source->GetOwningLayerGuid(),
                                                     layer.GetGuid());
@@ -304,7 +304,7 @@ TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
 {
     for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
     {
-        BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+        ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
         if (inputLayer->GetBindingId() == layerId)
         {
             return inputLayer->GetOutputSlot(0).GetTensorInfo();
@@ -318,8 +318,8 @@ TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
 {
     for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
     {
-        BOOST_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
-        BOOST_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
+        ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
+        ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
         if (outputLayer->GetBindingId() == layerId)
         {
             return outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -346,10 +346,10 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
 
     workloadFactory = it->second.first.get();
 
-    BOOST_ASSERT_MSG(workloadFactory, "No workload factory");
+    ARMNN_ASSERT_MSG(workloadFactory, "No workload factory");
 
     std::string reasonIfUnsupported;
-    BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
+    ARMNN_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
         "Factory does not support layer");
     IgnoreUnused(reasonIfUnsupported);
     return *workloadFactory;
@@ -540,11 +540,11 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens
     inputQueueDescriptor.m_Inputs.push_back(tensorHandle);
     info.m_InputTensorInfos.push_back(tensorInfo);
 
-    BOOST_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
+    ARMNN_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
     const OutputHandler& handler = layer.GetOutputHandler();
     const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
     ITensorHandle* outputTensorHandle = handler.GetData();
-    BOOST_ASSERT_MSG(outputTensorHandle != nullptr,
+    ARMNN_ASSERT_MSG(outputTensorHandle != nullptr,
                      "Data should have been allocated.");
     inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle);
     info.m_OutputTensorInfos.push_back(outputTensorInfo);
@@ -574,7 +574,7 @@ void LoadedNetwork::EnqueueInput(const BindableLayer& layer, ITensorHandle* tens
         // Create a mem copy workload for input since we did not import
         std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor, info);
 
-        BOOST_ASSERT_MSG(inputWorkload, "No input workload created");
+        ARMNN_ASSERT_MSG(inputWorkload, "No input workload created");
 
         std::unique_ptr<TimelineUtilityMethods> timelineUtils =
                             TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
@@ -607,14 +607,14 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
     outputQueueDescriptor.m_Outputs.push_back(tensorHandle);
     info.m_OutputTensorInfos.push_back(tensorInfo);
 
-    BOOST_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
+    ARMNN_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
 
     // Gets the output handler from the previous node.
     const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
 
     const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
     ITensorHandle* inputTensorHandle = outputHandler.GetData();
-    BOOST_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+    ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
 
     // Try import the output tensor.
     // Note: We can only import the output pointer if all of the following  hold true:
@@ -641,7 +641,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
                     syncDesc.m_Inputs.push_back(inputTensorHandle);
                     info.m_InputTensorInfos.push_back(inputTensorInfo);
                     auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
-                    BOOST_ASSERT_MSG(syncWorkload, "No sync workload created");
+                    ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
                     m_OutputQueue.push_back(move(syncWorkload));
                 }
                 else
@@ -667,7 +667,7 @@ void LoadedNetwork::EnqueueOutput(const BindableLayer& layer, ITensorHandle* ten
 
         std::unique_ptr<IWorkload> outputWorkload =
             std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info);
-        BOOST_ASSERT_MSG(outputWorkload, "No output workload created");
+        ARMNN_ASSERT_MSG(outputWorkload, "No output workload created");
 
         std::unique_ptr<TimelineUtilityMethods> timelineUtils =
                                 TimelineUtilityMethods::GetTimelineUtils(m_ProfilingService);
index ba40123..a3ca7ce 100644 (file)
@@ -6,6 +6,7 @@
 #include <armnn/Logging.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #if defined(_MSC_VER)
 #ifndef NOMINMAX
@@ -19,7 +20,6 @@
 #include <android/log.h>
 #endif
 
-#include <boost/assert.hpp>
 #include <iostream>
 
 namespace armnn
@@ -54,7 +54,7 @@ void SetLogFilter(LogSeverity level)
             SimpleLogger<LogSeverity::Fatal>::Get().Enable(true);
             break;
         default:
-            BOOST_ASSERT(false);
+            ARMNN_ASSERT(false);
     }
 }
 
index a443721..ac5159a 100644 (file)
@@ -22,6 +22,7 @@
 #include <armnn/TypesUtils.hpp>
 #include <armnn/BackendRegistry.hpp>
 #include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <ProfilingService.hpp>
@@ -33,7 +34,6 @@
 #include <vector>
 #include <algorithm>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/converter_policies.hpp>
 #include <boost/cast.hpp>
@@ -473,7 +473,7 @@ OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
                 }
                 else
                 {
-                    BOOST_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
+                    ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
                 }
             }
         }
@@ -527,7 +527,7 @@ BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRe
     {
         auto backendFactory = backendRegistry.GetFactory(selectedBackend);
         auto backendObjPtr = backendFactory();
-        BOOST_ASSERT(backendObjPtr);
+        ARMNN_ASSERT(backendObjPtr);
 
         backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
 
@@ -542,7 +542,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
                                              BackendsMap& backends,
                                              Optional<std::vector<std::string>&> errMessages)
 {
-    BOOST_ASSERT(optNetObjPtr);
+    ARMNN_ASSERT(optNetObjPtr);
 
     OptimizationResult result;
 
@@ -553,7 +553,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
     for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
     {
         auto backendObjPtr = backends.find(selectedBackend)->second.get();
-        BOOST_ASSERT(backendObjPtr);
+        ARMNN_ASSERT(backendObjPtr);
 
         // Select sub-graphs based on backend
         SubgraphViewSelector::Subgraphs subgraphs =
@@ -576,7 +576,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
         {
             // Try to optimize the current sub-graph
             OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph);
-            BOOST_ASSERT(optimizationViews.Validate(*subgraph));
+            ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
 
             // Optimization attempted, check the resulting optimized sub-graph
             for (auto& substitution : optimizationViews.GetSubstitutions())
@@ -589,7 +589,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
                 // Assign the current backend to the optimized sub-graph
                 std::for_each(replacementSubgraph.begin(), replacementSubgraph.end(), [&selectedBackend](Layer* l)
                     {
-                        BOOST_ASSERT(l);
+                        ARMNN_ASSERT(l);
                         l->SetBackendId(selectedBackend);
                     });
             }
@@ -660,7 +660,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backend
                                                             TensorHandleFactoryRegistry& registry)
 {
     Layer& layer = slot.GetOwningLayer();
-    BOOST_ASSERT(layer.GetType() == LayerType::Input);
+    ARMNN_ASSERT(layer.GetType() == LayerType::Input);
 
     // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
     // doesn't matter which backend it is assigned to because they all use the same implementation, which
@@ -686,7 +686,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForInput(BackendsMap& backend
         const Layer& connectedLayer = connection->GetOwningLayer();
 
         auto toBackend = backends.find(connectedLayer.GetBackendId());
-        BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+        ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
 
         if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
         {
@@ -802,7 +802,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOption(BackendsMap& backends,
         const Layer& connectedLayer = connection->GetOwningLayer();
 
         auto toBackend = backends.find(connectedLayer.GetBackendId());
-        BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+        ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
 
         auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
         for (auto&& src : srcPrefs)
@@ -863,7 +863,7 @@ EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
                                    TensorHandleFactoryRegistry& registry)
 {
     auto toBackend = backends.find(connectedLayer.GetBackendId());
-    BOOST_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+    ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
 
     auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
 
@@ -942,11 +942,11 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
 
     optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
     {
-        BOOST_ASSERT(layer);
+        ARMNN_ASSERT(layer);
 
         // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
         // assignment if this check fails
-        BOOST_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
+        ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
 
         // Check each output separately
         for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
@@ -1132,7 +1132,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
     {
         auto factoryFun = BackendRegistryInstance().GetFactory(chosenBackend);
         auto backendPtr = factoryFun();
-        BOOST_ASSERT(backendPtr.get() != nullptr);
+        ARMNN_ASSERT(backendPtr.get() != nullptr);
 
         ARMNN_NO_DEPRECATE_WARN_BEGIN
         auto backendSpecificOptimizations = backendPtr->GetOptimizations();
index 75473b4..dd0affd 100644 (file)
@@ -33,7 +33,7 @@ ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>
         }
             break;
         default:
-            BOOST_ASSERT_MSG(false, "Can't quantize unsupported data type");
+            ARMNN_ASSERT_MSG(false, "Can't quantize unsupported data type");
     }
 
     TensorInfo qInfo(tensor.GetInfo().GetShape(), DataType::QAsymmU8, scale, offset);
index 303a118..dd274f9 100644 (file)
 #include <armnn/Tensor.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnn/ILayerVisitor.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <utility>
 #include <limits>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
 template<typename srcType>
 void QuantizeConstant(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
 {
-    BOOST_ASSERT(src);
-    BOOST_ASSERT(dst);
+    ARMNN_ASSERT(src);
+    ARMNN_ASSERT(dst);
 
     float min = std::numeric_limits<srcType>::max();
     float max = std::numeric_limits<srcType>::lowest();
index 0549a11..285da4c 100644 (file)
@@ -245,7 +245,7 @@ std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer)
             graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str());
 
         // Sets output tensor info for the debug layer.
-        BOOST_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
+        ARMNN_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
         TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
 
         debugLayer->GetOutputSlot().SetTensorInfo(debugInfo);
index 0a31f84..cfb0693 100644 (file)
@@ -28,7 +28,7 @@ void Optimizer::Pass(Graph& graph, const Optimizations& optimizations)
         --it;
         for (auto&& optimization : optimizations)
         {
-            BOOST_ASSERT(*it);
+            ARMNN_ASSERT(*it);
             optimization->Run(graph, **it);
 
             if ((*it)->IsOutputUnconnected())
index 5a542fd..973d23b 100644 (file)
@@ -9,8 +9,6 @@
 #include <backendsCommon/WorkloadDataCollector.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
index 9cfde20..352520a 100644 (file)
@@ -17,8 +17,6 @@
 #include <string>
 #include <vector>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
index d0453fe..6e5137b 100644 (file)
@@ -9,8 +9,6 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
index b1aedaa..7194064 100644 (file)
@@ -5,6 +5,7 @@
 #include "Profiling.hpp"
 
 #include <armnn/BackendId.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include "JsonPrinter.hpp"
@@ -45,7 +46,7 @@ constexpr bool g_WriteReportToStdOutOnProfilerDestruction = false;
 Measurement FindMeasurement(const std::string& name, const Event* event)
 {
 
-    BOOST_ASSERT(event != nullptr);
+    ARMNN_ASSERT(event != nullptr);
 
     // Search though the measurements.
     for (const auto& measurement : event->GetMeasurements())
@@ -63,7 +64,7 @@ Measurement FindMeasurement(const std::string& name, const Event* event)
 
 std::vector<Measurement> FindKernelMeasurements(const Event* event)
 {
-    BOOST_ASSERT(event != nullptr);
+    ARMNN_ASSERT(event != nullptr);
 
     std::vector<Measurement> measurements;
 
@@ -219,13 +220,13 @@ void Profiler::EndEvent(Event* event)
 {
     event->Stop();
 
-    BOOST_ASSERT(!m_Parents.empty());
-    BOOST_ASSERT(event == m_Parents.top());
+    ARMNN_ASSERT(!m_Parents.empty());
+    ARMNN_ASSERT(event == m_Parents.top());
     m_Parents.pop();
 
     Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
     IgnoreUnused(parent);
-    BOOST_ASSERT(event->GetParentEvent() == parent);
+    ARMNN_ASSERT(event->GetParentEvent() == parent);
 
 #if ARMNN_STREAMLINE_ENABLED
     ANNOTATE_CHANNEL_END(uint32_t(m_Parents.size()));
@@ -287,7 +288,7 @@ void ExtractJsonObjects(unsigned int inferenceIndex,
                         JsonChildObject& parentObject,
                         std::map<const Event*, std::vector<const Event*>> descendantsMap)
 {
-    BOOST_ASSERT(parentEvent);
+    ARMNN_ASSERT(parentEvent);
     std::vector<Measurement> instrumentMeasurements = parentEvent->GetMeasurements();
     unsigned int childIdx=0;
     for(size_t measurementIndex = 0; measurementIndex < instrumentMeasurements.size(); ++measurementIndex, ++childIdx)
@@ -299,7 +300,7 @@ void ExtractJsonObjects(unsigned int inferenceIndex,
             measurementObject.SetUnit(instrumentMeasurements[measurementIndex].m_Unit);
             measurementObject.SetType(JsonObjectType::Measurement);
 
-            BOOST_ASSERT(parentObject.NumChildren() == childIdx);
+            ARMNN_ASSERT(parentObject.NumChildren() == childIdx);
             parentObject.AddChild(measurementObject);
         }
 
index 8e7c45f..16e8a60 100644 (file)
@@ -24,15 +24,15 @@ QuantizerVisitor::QuantizerVisitor(const RangeTracker& rangeTracker,
 void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* srcLayer,
                                                     IConnectableLayer* quantizedLayer)
 {
-    BOOST_ASSERT(srcLayer);
+    ARMNN_ASSERT(srcLayer);
     for (unsigned int i = 0; i < srcLayer->GetNumInputSlots(); i++)
     {
         const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(i);
         const InputSlot* inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot);
-        BOOST_ASSERT(inputSlot);
+        ARMNN_ASSERT(inputSlot);
         const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
 
-        BOOST_ASSERT(outputSlot);
+        ARMNN_ASSERT(outputSlot);
         unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
         Layer& layerToFind = outputSlot->GetOwningLayer();
 
@@ -40,7 +40,7 @@ void QuantizerVisitor::SetQuantizedInputConnections(const IConnectableLayer* src
         if (found == m_OriginalToQuantizedGuidMap.end())
         {
             // Error in graph traversal order
-            BOOST_ASSERT_MSG(false, "Error in graph traversal");
+            ARMNN_ASSERT_MSG(false, "Error in graph traversal");
             return;
         }
 
@@ -68,13 +68,13 @@ ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLa
                                                   const Optional<ConstTensor>& biases,
                                                   std::vector<int32_t>& backing)
 {
-    BOOST_ASSERT(srcLayer);
+    ARMNN_ASSERT(srcLayer);
     const IInputSlot& srcInputSlot = srcLayer->GetInputSlot(0);
     auto inputSlot = boost::polymorphic_downcast<const InputSlot*>(&srcInputSlot);
-    BOOST_ASSERT(inputSlot);
+    ARMNN_ASSERT(inputSlot);
     const OutputSlot* outputSlot = inputSlot->GetConnectedOutputSlot();
 
-    BOOST_ASSERT(outputSlot);
+    ARMNN_ASSERT(outputSlot);
     unsigned int slotIdx = outputSlot->CalculateIndexOnOwner();
     Layer& layerToFind = outputSlot->GetOwningLayer();
 
@@ -82,7 +82,7 @@ ConstTensor QuantizerVisitor::CreateQuantizedBias(const IConnectableLayer* srcLa
     if (found == m_OriginalToQuantizedGuidMap.end())
     {
         // Error in graph traversal order
-        BOOST_ASSERT_MSG(false, "Error in graph traversal");
+        ARMNN_ASSERT_MSG(false, "Error in graph traversal");
         return biases.value();
     }
 
index dfcbf85..f44606c 100644 (file)
@@ -192,7 +192,7 @@ Runtime::Runtime(const CreationOptions& options)
         try {
             auto factoryFun = BackendRegistryInstance().GetFactory(id);
             auto backend = factoryFun();
-            BOOST_ASSERT(backend.get() != nullptr);
+            ARMNN_ASSERT(backend.get() != nullptr);
 
             auto context = backend->CreateBackendContext(options);
 
index 7705e68..446485f 100644 (file)
@@ -28,10 +28,10 @@ void AssertIfNullsOrDuplicates(const C& container, const std::string& errorMessa
         IgnoreUnused(errorMessage);
 
         // Check if the item is valid
-        BOOST_ASSERT_MSG(i, errorMessage.c_str());
+        ARMNN_ASSERT_MSG(i, errorMessage.c_str());
 
         // Check if a duplicate has been found
-        BOOST_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
+        ARMNN_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
 
         duplicateSet.insert(i);
     });
index 02b7bda..fa2fad9 100644 (file)
@@ -6,9 +6,9 @@
 #include "SubgraphViewSelector.hpp"
 #include "Graph.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <algorithm>
 #include <map>
 #include <queue>
@@ -80,14 +80,14 @@ public:
             for (PartialSubgraph* a : m_Antecedents)
             {
                 size_t numErased = a->m_Dependants.erase(this);
-                BOOST_ASSERT(numErased == 1);
+                ARMNN_ASSERT(numErased == 1);
                 IgnoreUnused(numErased);
                 a->m_Dependants.insert(m_Parent);
             }
             for (PartialSubgraph* a : m_Dependants)
             {
                 size_t numErased = a->m_Antecedents.erase(this);
-                BOOST_ASSERT(numErased == 1);
+                ARMNN_ASSERT(numErased == 1);
                 IgnoreUnused(numErased);
                 a->m_Antecedents.insert(m_Parent);
             }
@@ -197,7 +197,7 @@ struct LayerSelectionInfo
         for (auto&& slot = m_Layer->BeginInputSlots(); slot != m_Layer->EndInputSlots(); ++slot)
         {
             OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
-            BOOST_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
+            ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
             if (parentLayerOutputSlot)
             {
                 Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer();
@@ -268,7 +268,7 @@ void ForEachLayerInput(LayerSelectionInfo::LayerInfoContainer& layerInfos,
     for (auto inputSlot : layer.GetInputSlots())
     {
         auto connectedInput = boost::polymorphic_downcast<OutputSlot*>(inputSlot.GetConnection());
-        BOOST_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
+        ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
         Layer& inputLayer = connectedInput->GetOwningLayer();
 
         auto parentInfo = layerInfos.find(&inputLayer);
index aeb7ab5..4dc6f0d 100644 (file)
@@ -8,7 +8,8 @@
 #include "armnn/Exceptions.hpp"
 #include "armnn/TypesUtils.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <sstream>
@@ -252,7 +253,7 @@ float TensorInfo::GetQuantizationScale() const
         return 1.0f;
     }
 
-    BOOST_ASSERT(!HasMultipleQuantizationScales());
+    ARMNN_ASSERT(!HasMultipleQuantizationScales());
     return m_Quantization.m_Scales[0];
 }
 
index f4f857f..9e58dc8 100644 (file)
@@ -3,8 +3,8 @@
 // SPDX-License-Identifier: MIT
 //
 #include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace
@@ -33,8 +33,8 @@ QuantizedType armnn::Quantize(float value, float scale, int32_t offset)
     static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
     constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
     constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
-    BOOST_ASSERT(scale != 0.f);
-    BOOST_ASSERT(!std::isnan(value));
+    ARMNN_ASSERT(scale != 0.f);
+    ARMNN_ASSERT(!std::isnan(value));
 
     float clampedValue = std::min(std::max(static_cast<float>(round(value/scale) + offset), static_cast<float>(min)),
                                   static_cast<float>(max));
@@ -47,8 +47,8 @@ template <typename QuantizedType>
 float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
 {
     static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
-    BOOST_ASSERT(scale != 0.f);
-    BOOST_ASSERT(!IsNan(value));
+    ARMNN_ASSERT(scale != 0.f);
+    ARMNN_ASSERT(!IsNan(value));
     float dequantized = boost::numeric_cast<float>(value - offset) * scale;
     return dequantized;
 }
index f67d965..490b03e 100644 (file)
@@ -36,7 +36,7 @@ void AbsLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
             "AbsLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 263fb72..d310b7e 100644 (file)
@@ -34,7 +34,7 @@ void ActivationLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ActivationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index b67c42b..a990787 100644 (file)
@@ -34,7 +34,7 @@ ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     TensorShape inputShape = inputShapes[0];
     auto inputNumDimensions = inputShape.GetNumDimensions();
@@ -42,7 +42,7 @@ std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<Ten
     auto axis = m_Param.m_Axis;
     auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
 
-    BOOST_ASSERT(unsignedAxis <= inputNumDimensions);
+    ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
 
     // 1D input shape results in scalar output
     if (inputShape.GetNumDimensions() == 1)
@@ -75,7 +75,7 @@ void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
             "ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.",
index aed7447..7f61cad 100644 (file)
@@ -21,10 +21,10 @@ BatchNormalizationLayer::BatchNormalizationLayer(const armnn::BatchNormalization
 std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     // on this level constant data should not be released..
-    BOOST_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
-    BOOST_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
-    BOOST_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
-    BOOST_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
+    ARMNN_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
+    ARMNN_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
+    ARMNN_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
+    ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
 
     BatchNormalizationQueueDescriptor descriptor;
 
@@ -54,7 +54,7 @@ void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "BatchNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 7e70452..1da88c6 100644 (file)
@@ -47,7 +47,7 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "BatchToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
@@ -56,7 +56,7 @@ void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
 
 std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     const TensorShape& inputShape = inputShapes[0];
     TensorShape outputShape(inputShape);
@@ -66,7 +66,7 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
                                                          1U,
                                                          std::multiplies<>());
 
-    BOOST_ASSERT(inputShape[0] % accumulatedBlockShape == 0);
+    ARMNN_ASSERT(inputShape[0] % accumulatedBlockShape == 0);
 
     outputShape[0] = inputShape[0] / accumulatedBlockShape;
 
@@ -80,10 +80,10 @@ std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vecto
     unsigned int outputHeight = inputShape[heightIndex] * m_Param.m_BlockShape[0];
     unsigned int outputWidth = inputShape[widthIndex] * m_Param.m_BlockShape[1];
 
-    BOOST_ASSERT_MSG(heightCrop <= outputHeight,
+    ARMNN_ASSERT_MSG(heightCrop <= outputHeight,
         "BatchToSpaceLayer: Overall height crop should be less than or equal to the uncropped output height.");
 
-    BOOST_ASSERT_MSG(widthCrop <= outputWidth,
+    ARMNN_ASSERT_MSG(widthCrop <= outputWidth,
         "BatchToSpaceLayer: Overall width crop should be less than or equal to the uncropped output width.");
 
     outputShape[heightIndex] = outputHeight - heightCrop;
index 1f6e35f..9108045 100644 (file)
@@ -33,11 +33,11 @@ ComparisonLayer* ComparisonLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
     const TensorShape& input0 = inputShapes[0];
     const TensorShape& input1 = inputShapes[1];
 
-    BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+    ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
     unsigned int numDims = input0.GetNumDimensions();
 
     std::vector<unsigned int> dims(numDims);
@@ -46,7 +46,7 @@ std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<Te
         unsigned int dim0 = input0[i];
         unsigned int dim1 = input1[i];
 
-        BOOST_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
                          "Dimensions should either match or one should be of size 1.");
 
         dims[i] = std::max(dim0, dim1);
@@ -63,7 +63,7 @@ void ComparisonLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
         GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
     });
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ComparisonLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index f4024af..5df5ec8 100644 (file)
@@ -111,7 +111,7 @@ void ConcatLayer::CreateTensors(const FactoryType& factory)
                 OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
                 OutputHandler& outputHandler = slot->GetOutputHandler();
 
-                BOOST_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
+                ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
                 outputHandler.SetData(std::move(subTensor));
 
                 Layer& inputLayer = slot->GetOwningLayer();
@@ -141,7 +141,7 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
     else
     {
         ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
-        BOOST_ASSERT(handleFactory);
+        ARMNN_ASSERT(handleFactory);
         CreateTensors(*handleFactory);
     }
 }
@@ -153,7 +153,7 @@ ConcatLayer* ConcatLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+    ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
 
     unsigned int numDims = m_Param.GetNumDimensions();
     for (unsigned int i=0; i< inputShapes.size(); i++)
@@ -259,7 +259,7 @@ void ConcatLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes(inputShapes);
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ConcatLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 147aa8f..30d20b8 100644 (file)
@@ -36,7 +36,7 @@ void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 7873c94..08f0e4a 100644 (file)
@@ -36,7 +36,7 @@ void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ConvertFp16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 936acf6..c9e0962 100644 (file)
@@ -36,7 +36,7 @@ void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ConvertFp32ToBf16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index bbf4dbf..95403e9 100644 (file)
@@ -35,7 +35,7 @@ void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 55a243a..d82908a 100644 (file)
@@ -49,7 +49,7 @@ void Convolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn
 std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     // on this level constant data should not be released..
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
 
     Convolution2dQueueDescriptor descriptor;
 
@@ -57,7 +57,7 @@ std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFac
 
     if (m_Param.m_BiasEnabled)
     {
-        BOOST_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
+        ARMNN_ASSERT_MSG(m_Bias != nullptr, "Convolution2dLayer: Bias data should not be null.");
         descriptor.m_Bias = m_Bias.get();
     }
     return factory.CreateConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
@@ -79,12 +79,12 @@ Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape filterShape = inputShapes[1];
 
     // If we support multiple batch dimensions in the future, then this assert will need to change.
-    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -117,13 +117,13 @@ void Convolution2dLayer::ValidateTensorShapesFromInputs()
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     // check if we m_Weight data is not nullptr
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
 
     auto inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
         m_Weight->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "Convolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 76d33f2..6aaf945 100644 (file)
@@ -41,7 +41,7 @@ void DebugLayer::ValidateTensorShapesFromInputs()
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "DebugLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index bb74232..2d13271 100644 (file)
@@ -38,7 +38,7 @@ DepthToSpaceLayer* DepthToSpaceLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     TensorShape inputShape = inputShapes[0];
     TensorShape outputShape(inputShape);
@@ -64,7 +64,7 @@ void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "DepthToSpaceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index f37096a..dc6b2c2 100644 (file)
@@ -51,7 +51,7 @@ void DepthwiseConvolution2dLayer::SerializeLayerParameters(ParameterStringifyFun
 std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     // on this level constant data should not be released..
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
 
     DepthwiseConvolution2dQueueDescriptor descriptor;
 
@@ -59,7 +59,7 @@ std::unique_ptr<IWorkload> DepthwiseConvolution2dLayer::CreateWorkload(const IWo
 
     if (m_Param.m_BiasEnabled)
     {
-        BOOST_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
+        ARMNN_ASSERT_MSG(m_Bias != nullptr, "DepthwiseConvolution2dLayer: Bias data should not be null.");
         descriptor.m_Bias = m_Bias.get();
     }
     return factory.CreateDepthwiseConvolution2d(descriptor, PrepInfoAndDesc(descriptor));
@@ -81,11 +81,11 @@ DepthwiseConvolution2dLayer* DepthwiseConvolution2dLayer::Clone(Graph& graph) co
 std::vector<TensorShape>
 DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
     const TensorShape& inputShape  = inputShapes[0];
     const TensorShape& filterShape = inputShapes[1];
 
-    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -124,14 +124,14 @@ void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     // on this level constant data should not be released..
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
 
     auto inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
         m_Weight->GetTensorInfo().GetShape()
      });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "DepthwiseConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 00a1d69..5b57279 100644 (file)
@@ -36,7 +36,7 @@ void DequantizeLayer::ValidateTensorShapesFromInputs()
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 8749b33..e8d14d9 100644 (file)
@@ -39,9 +39,9 @@ void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
     VerifyLayerConnections(2, CHECK_LOCATION());
 
     // on this level constant data should not be released.
-    BOOST_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
+    ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
 
-    BOOST_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
+    ARMNN_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
 
     unsigned int detectedBoxes = m_Param.m_MaxDetections * m_Param.m_MaxClassesPerDetection;
 
index 7618141..2c1e871 100644 (file)
@@ -8,8 +8,7 @@
 #include "InternalTypes.hpp"
 #include "armnn/Exceptions.hpp"
 #include <armnn/TypesUtils.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -22,12 +21,12 @@ ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned
 
 std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
     auto& input0 = inputShapes[0];
     auto& input1 = inputShapes[1];
 
     // Get the max of the inputs.
-    BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+    ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
     unsigned int numDims = input0.GetNumDimensions();
     std::vector<unsigned int> dims(numDims);
 
@@ -38,7 +37,7 @@ std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vect
 
 #if !NDEBUG
         // Validate inputs are broadcast compatible.
-        BOOST_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
+        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
                          "Dimensions should either match or one should be of size 1.");
 #endif
 
@@ -57,7 +56,7 @@ void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
     });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     std::string msg = GetLayerTypeAsCString(GetType());
     msg += "Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.";
index d3843da..c91057c 100644 (file)
@@ -34,7 +34,7 @@ ElementwiseUnaryLayer* ElementwiseUnaryLayer::Clone(Graph& graph) const
 std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
     // Should return the shape of the input tensor
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
     const TensorShape& input = inputShapes[0];
 
     return std::vector<TensorShape>({ input });
@@ -46,7 +46,7 @@ void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ElementwiseUnaryLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 8611b9b..2b4ad86 100644 (file)
@@ -35,7 +35,7 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 148543c..fb918f6 100644 (file)
@@ -35,7 +35,7 @@ void FloorLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "FloorLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 6b36bad..4bbc9ba 100644 (file)
@@ -22,14 +22,14 @@ FullyConnectedLayer::FullyConnectedLayer(const FullyConnectedDescriptor& param,
 std::unique_ptr<IWorkload> FullyConnectedLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     // on this level constant data should not be released..
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
 
     FullyConnectedQueueDescriptor descriptor;
 
     descriptor.m_Weight = m_Weight.get();
     if (m_Param.m_BiasEnabled)
     {
-        BOOST_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
+        ARMNN_ASSERT_MSG(m_Bias != nullptr, "FullyConnectedLayer: Bias data should not be null.");
         descriptor.m_Bias = m_Bias.get();
     }
     return factory.CreateFullyConnected(descriptor, PrepInfoAndDesc(descriptor));
@@ -50,7 +50,7 @@ FullyConnectedLayer* FullyConnectedLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape weightShape = inputShapes[1];
 
@@ -66,13 +66,13 @@ void FullyConnectedLayer::ValidateTensorShapesFromInputs()
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     // check if we m_Weight data is not nullptr
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
 
     auto inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
         m_Weight->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "FullyConnectedLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 9e0212f..25b133a 100644 (file)
@@ -35,7 +35,7 @@ void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "InstanceNormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 3d9dc53..e6d5f06 100644 (file)
@@ -35,7 +35,7 @@ void L2NormalizationLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "L2NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 24b6fde..627aa4c 100644 (file)
@@ -34,7 +34,7 @@ void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
     VerifyLayerConnections(1, CHECK_LOCATION());
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "LogSoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 1d94569..653b18a 100644 (file)
@@ -147,7 +147,7 @@ LstmLayer* LstmLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 3);
+    ARMNN_ASSERT(inputShapes.size() == 3);
 
     // Get input values for validation
     unsigned int batchSize = inputShapes[0][0];
@@ -173,35 +173,35 @@ void LstmLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape()}
     );
 
-    BOOST_ASSERT(inferredShapes.size() == 4);
+    ARMNN_ASSERT(inferredShapes.size() == 4);
 
     // Check if the weights are nullptr
-    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
                      "LstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
                      "LstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
                      "LstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
                      "LstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
                      "LstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
                      "LstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
                      "LstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
                      "LstmLayer: m_BasicParameters.m_CellBias should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
                      "LstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
 
     if (!m_Param.m_CifgEnabled)
     {
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
                          "LstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
                          "LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
                          "LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
 
         ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -211,11 +211,11 @@ void LstmLayer::ValidateTensorShapesFromInputs()
     }
     else
     {
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
             "LstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
             "LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value when CIFG is enabled.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
             "LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
 
         ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -226,7 +226,7 @@ void LstmLayer::ValidateTensorShapesFromInputs()
 
     if (m_Param.m_ProjectionEnabled)
     {
-        BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
                          "LstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
     }
 
@@ -234,13 +234,13 @@ void LstmLayer::ValidateTensorShapesFromInputs()
     {
         if (!m_Param.m_CifgEnabled)
         {
-            BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+            ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
                              "LstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
                              "when Peephole is enabled and CIFG is disabled.");
         }
-        BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
                          "LstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
-        BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
                          "LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
     }
 
@@ -261,14 +261,14 @@ void LstmLayer::ValidateTensorShapesFromInputs()
     {
         if(!m_Param.m_CifgEnabled)
         {
-            BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+            ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
                              "LstmLayer: m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
         }
-        BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
                          "LstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
-        BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
                          "LstmLayer: m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
-        BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
                          "LstmLayer: m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
     }
 }
index 30b88fa..5fa88f9 100644 (file)
@@ -44,7 +44,7 @@ void MeanLayer::ValidateTensorShapesFromInputs()
 
     const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
 
-    BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
+    ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
                      "MeanLayer: Mean supports up to 4D input.");
 
     unsigned int rank = input.GetNumDimensions();
index cf69c17..e4009de 100644 (file)
@@ -39,7 +39,7 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "MemCopyLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 80f9fda..bcccba1 100644 (file)
@@ -39,7 +39,7 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "MemImportLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index f2fd29f..ad7d8b1 100644 (file)
@@ -36,7 +36,7 @@ void MergeLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape(),
     });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "MergeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
@@ -46,7 +46,7 @@ void MergeLayer::ValidateTensorShapesFromInputs()
 
 std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "MergeLayer: TensorShapes set on inputs do not match",
index 09f8a0d..44179fd 100644 (file)
@@ -35,7 +35,7 @@ void NormalizationLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "NormalizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 0fc3ce4..e565b48 100644 (file)
@@ -35,7 +35,7 @@ PermuteLayer* PermuteLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
     const TensorShape& inShape = inputShapes[0];
     return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
 }
@@ -46,7 +46,7 @@ void PermuteLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "PermuteLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index a3c2425..ad2c82f 100644 (file)
@@ -37,12 +37,12 @@ Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
     const TensorShape& inputShape = inputShapes[0];
     const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
 
     // If we support multiple batch dimensions in the future, then this assert will need to change.
-    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
+    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
 
     unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
     unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
@@ -54,7 +54,7 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
     unsigned int outHeight = 1;
     if (!isGlobalPooling)
     {
-        BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
+        ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
                          "Stride can only be zero when performing global pooling");
 
         auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
@@ -72,7 +72,7 @@ std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<Ten
                         size = static_cast<unsigned int>(floor(div)) + 1;
                         break;
                     default:
-                        BOOST_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+                        ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
                 }
 
                 // MakeS sure that border operations will start from inside the input and not the padded area.
@@ -106,7 +106,7 @@ void Pooling2dLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index d9e5922..6094806 100644 (file)
@@ -34,7 +34,7 @@ PreluLayer* PreluLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
 
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape& alphaShape = inputShapes[1];
@@ -42,8 +42,8 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
     const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
     const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
 
-    BOOST_ASSERT(inputShapeDimensions > 0);
-    BOOST_ASSERT(alphaShapeDimensions > 0);
+    ARMNN_ASSERT(inputShapeDimensions > 0);
+    ARMNN_ASSERT(alphaShapeDimensions > 0);
 
     // The size of the output is the maximum size along each dimension of the input operands,
     // it starts with the trailing dimensions, and works its way forward
@@ -63,7 +63,7 @@ std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorS
         unsigned int alphaDimension = alphaShape[boost::numeric_cast<unsigned int>(alphaShapeIndex)];
 
         // Check that the inputs are broadcast compatible
-        BOOST_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
+        ARMNN_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
                          "PreluLayer: Dimensions should either match or one should be of size 1");
 
         outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
@@ -104,7 +104,7 @@ void PreluLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
     });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "PreluLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 393a702..9b940c1 100644 (file)
@@ -150,7 +150,7 @@ QLstmLayer* QLstmLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 3);
+    ARMNN_ASSERT(inputShapes.size() == 3);
 
     // Get input values for validation
     unsigned int batchSize = inputShapes[0][0];
@@ -176,35 +176,35 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape() //  previousCellStateIn
     });
 
-    BOOST_ASSERT(inferredShapes.size() == 3);
+    ARMNN_ASSERT(inferredShapes.size() == 3);
 
     // Check if the weights are nullptr for basic params
-    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
             "QLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
             "QLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
             "QLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
             "QLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
             "QLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
             "QLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
             "QLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
             "QLstmLayer: m_BasicParameters.m_CellBias should not be null.");
-    BOOST_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
             "QLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
 
     if (!m_Param.m_CifgEnabled)
     {
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
                 "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
                 "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
                 "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
 
         ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -214,12 +214,12 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
     }
     else
     {
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
                 "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
                 "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should "
                              "not have a value when CIFG is enabled.");
-        BOOST_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
+        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
                 "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
 
         ConditionalThrowIfNotEqual<LayerValidationException>(
@@ -230,23 +230,23 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
 
     if (m_Param.m_ProjectionEnabled)
     {
-        BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
                          "QLstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
-        BOOST_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
+        ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionBias != nullptr,
                          "QLstmLayer: m_ProjectionParameters.m_ProjectionBias should not be null.");
     }
 
     if (m_Param.m_PeepholeEnabled)
     {
         if (!m_Param.m_CifgEnabled) {
-            BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
+            ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
                     "QLstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
                     "when Peephole is enabled and CIFG is disabled.");
         }
 
-        BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
                          "QLstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
-        BOOST_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
                          "QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
     }
 
@@ -263,14 +263,14 @@ void QLstmLayer::ValidateTensorShapesFromInputs()
     {
         if(!m_Param.m_CifgEnabled)
         {
-            BOOST_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
+            ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
                              "QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights should not be null.");
         }
-        BOOST_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
                          "QLstmLayer: m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
-        BOOST_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
                          "QLstmLayer: m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
-        BOOST_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
+        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
                          "QLstmLayer: m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
     }
 }
index 8717041..b56ae3f 100644 (file)
@@ -78,7 +78,7 @@ QuantizedLstmLayer* QuantizedLstmLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 3);
+    ARMNN_ASSERT(inputShapes.size() == 3);
 
     // Get input values for validation
     unsigned int numBatches = inputShapes[0][0];
@@ -102,34 +102,34 @@ void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
         GetInputSlot(2).GetConnection()->GetTensorInfo().GetShape()  // previousOutputIn
     });
 
-    BOOST_ASSERT(inferredShapes.size() == 2);
+    ARMNN_ASSERT(inferredShapes.size() == 2);
 
     // Check weights and bias for nullptr
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
 
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
 
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
-    BOOST_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
+    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
                      "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
 
     // Check output TensorShape(s) match inferred shape
index fbf3eaa..b496dbb 100644 (file)
@@ -42,7 +42,7 @@ void ReshapeLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({  });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ReshapeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index e341191..9654e58 100644 (file)
@@ -36,7 +36,7 @@ ResizeLayer* ResizeLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     const TensorShape& inputShape = inputShapes[0];
     const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
@@ -59,7 +59,7 @@ void ResizeLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "ResizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 6ff7372..dfd466d 100644 (file)
@@ -36,7 +36,7 @@ void RsqrtLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
             "RsqrtLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index ec82082..d92ed6f 100644 (file)
@@ -12,7 +12,6 @@
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnn
@@ -40,7 +39,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
             "SliceLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
@@ -51,7 +50,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
 std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
     IgnoreUnused(inputShapes);
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
 
index cb70bbc..738347c 100644 (file)
@@ -35,7 +35,7 @@ void SoftmaxLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "SoftmaxLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index ec724ba..ce48b5b 100644 (file)
@@ -41,7 +41,7 @@ SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     TensorShape inputShape = inputShapes[0];
     TensorShape outputShape(inputShape);
@@ -73,7 +73,7 @@ void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "SpaceToBatchNdLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 8aa0c9f..bf65240 100644 (file)
@@ -41,7 +41,7 @@ SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     TensorShape inputShape = inputShapes[0];
     TensorShape outputShape(inputShape);
@@ -66,7 +66,7 @@ void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "SpaceToDepthLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index f655e71..8ec8121 100644 (file)
@@ -115,7 +115,7 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
     else
     {
         ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
-        BOOST_ASSERT(handleFactory);
+        ARMNN_ASSERT(handleFactory);
         CreateTensors(*handleFactory);
     }
 }
@@ -128,7 +128,7 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
     IgnoreUnused(inputShapes);
-    BOOST_ASSERT(inputShapes.size() ==  m_Param.GetNumViews());
+    ARMNN_ASSERT(inputShapes.size() ==  m_Param.GetNumViews());
     std::vector<TensorShape> outShapes;
     //Output shapes must match View shapes.
     for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
@@ -150,7 +150,7 @@ void SplitterLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes(views);
 
-    BOOST_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
+    ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
 
     for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
     {
index 6f793ca..e034cb4 100644 (file)
@@ -38,7 +38,7 @@ std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorS
     const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
     const unsigned int axis = m_Param.m_Axis;
 
-    BOOST_ASSERT(axis <= inputNumDimensions);
+    ARMNN_ASSERT(axis <= inputNumDimensions);
 
     std::vector<unsigned int> dimensionSizes(inputNumDimensions + 1, 0);
     for (unsigned int i = 0; i < axis; ++i)
@@ -84,7 +84,7 @@ void StackLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes(inputShapes);
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "StackLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index c31b9a4..b100f7a 100644 (file)
@@ -45,7 +45,7 @@ StridedSliceLayer* StridedSliceLayer::Clone(Graph& graph) const
 std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
     const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
 
     TensorShape inputShape = inputShapes[0];
     std::vector<unsigned int> outputShape;
@@ -86,7 +86,7 @@ void StridedSliceLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
                     "StridedSlice: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 4cacda6..c4b065a 100644 (file)
@@ -31,14 +31,14 @@ void SwitchLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
-    BOOST_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
+    ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
 
     // Assuming first input is the Input and second input is the Constant
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
         GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 2);
+    ARMNN_ASSERT(inferredShapes.size() == 2);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "SwitchLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index dca77b4..05941f7 100644 (file)
@@ -26,14 +26,14 @@ TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolut
 
 std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
 
     TransposeConvolution2dQueueDescriptor descriptor;
     descriptor.m_Weight = m_Weight.get();
 
     if (m_Param.m_BiasEnabled)
     {
-        BOOST_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
+        ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
         descriptor.m_Bias = m_Bias.get();
     }
 
@@ -57,11 +57,11 @@ TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) co
 std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
     const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 2);
+    ARMNN_ASSERT(inputShapes.size() == 2);
     const TensorShape& inputShape  = inputShapes[0];
     const TensorShape& kernelShape = inputShapes[1];
 
-    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
+    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -82,8 +82,8 @@ std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
     unsigned int kernelElements = kernelShape[0] * kernelShape[dataLayoutIndex.GetChannelsIndex()];
     unsigned int inputElements  = batches * inputShape[dataLayoutIndex.GetChannelsIndex()];
 
-    BOOST_ASSERT_MSG(inputElements != 0, "Invalid number of input elements");
-    BOOST_ASSERT_MSG(kernelElements % inputElements == 0, "Invalid number of elements");
+    ARMNN_ASSERT_MSG(inputElements != 0, "Invalid number of input elements");
+    ARMNN_ASSERT_MSG(kernelElements % inputElements == 0, "Invalid number of elements");
 
     unsigned int channels =  kernelElements / inputElements;
 
@@ -98,13 +98,13 @@ void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
 
-    BOOST_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
+    ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
 
     auto inferredShapes = InferOutputShapes({
          GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
          m_Weight->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "TransposeConvolution2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index 3c22b54..c058332 100644 (file)
@@ -35,7 +35,7 @@ TransposeLayer* TransposeLayer::Clone(Graph& graph) const
 
 std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    BOOST_ASSERT(inputShapes.size() == 1);
+    ARMNN_ASSERT(inputShapes.size() == 1);
     const TensorShape& inShape = inputShapes[0];
     return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
 }
@@ -46,7 +46,7 @@ void TransposeLayer::ValidateTensorShapesFromInputs()
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
 
-    BOOST_ASSERT(inferredShapes.size() == 1);
+    ARMNN_ASSERT(inferredShapes.size() == 1);
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "TransposeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
index b2a2ba4..e598deb 100644 (file)
@@ -21,8 +21,8 @@ public:
         Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
         Layer& child = connection.GetOwningLayer();
 
-        BOOST_ASSERT(base.GetType() == LayerType::Pad);
-        BOOST_ASSERT(child.GetType() == LayerType::Convolution2d);
+        ARMNN_ASSERT(base.GetType() == LayerType::Pad);
+        ARMNN_ASSERT(child.GetType() == LayerType::Convolution2d);
 
         PadLayer* padLayer = boost::polymorphic_downcast<PadLayer*>(&base);
         Convolution2dLayer* convolution2dLayer = boost::polymorphic_downcast<Convolution2dLayer*>(&child);
@@ -60,12 +60,12 @@ public:
         newConv2dLayer.GetOutputHandler().SetTensorInfo(outInfo);
 
         // Copy weights and bias to the new convolution layer
-        BOOST_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
+        ARMNN_ASSERT_MSG(convolution2dLayer->m_Weight != nullptr,
                          "FoldPadIntoConvolution2d: Weights data should not be null.");
         newConv2dLayer.m_Weight = std::move(convolution2dLayer->m_Weight);
         if (descriptor.m_BiasEnabled)
         {
-            BOOST_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
+            ARMNN_ASSERT_MSG(convolution2dLayer->m_Bias != nullptr,
                              "FoldPadIntoConvolution2d: Bias data should not be null if bias is enabled.");
             newConv2dLayer.m_Bias = std::move(convolution2dLayer->m_Bias);
         }
index 53d4a3c..39bfe6e 100644 (file)
@@ -21,8 +21,8 @@ public:
         Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
         Layer& child = connection.GetOwningLayer();
 
-        BOOST_ASSERT(base.GetType() == LayerType::Reshape);
-        BOOST_ASSERT(child.GetType() == LayerType::Reshape);
+        ARMNN_ASSERT(base.GetType() == LayerType::Reshape);
+        ARMNN_ASSERT(child.GetType() == LayerType::Reshape);
 
         OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();
 
index 3ea4a5b..d479445 100644 (file)
@@ -24,7 +24,7 @@ public:
         Layer& base  = connection.GetConnectedOutputSlot()->GetOwningLayer();
         Layer& child = connection.GetOwningLayer();
 
-        BOOST_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
+        ARMNN_ASSERT((base.GetType() == LayerType::ConvertFp16ToFp32 &&
                      child.GetType() == LayerType::ConvertFp32ToFp16) ||
                      (base.GetType() == LayerType::ConvertFp32ToFp16 &&
                      child.GetType() == LayerType::ConvertFp16ToFp32));
index 21aed86..ea4de9d 100644 (file)
@@ -22,7 +22,7 @@ public:
     {
         // Validate base layer (the Permute) is compatible
         Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
-        BOOST_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
+        ARMNN_ASSERT(base.GetType() == LayerType::Permute || base.GetType() == LayerType::Transpose);
         const TensorInfo& inputInfo = base.GetInputSlot(0).GetConnection()->GetTensorInfo();
         const TensorInfo& intermediateInfo = base.GetOutputSlot(0).GetTensorInfo();
         if (intermediateInfo.GetNumDimensions() != 4)
@@ -39,7 +39,7 @@ public:
 
         // Validate child layer (the BatchToSpace) is compatible
         Layer& child = connection.GetOwningLayer();
-        BOOST_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
+        ARMNN_ASSERT(child.GetType() == LayerType::BatchToSpaceNd);
         const TensorInfo& outputInfo = child.GetOutputSlot(0).GetTensorInfo();
         const BatchToSpaceNdDescriptor& batchToSpaceDesc = static_cast<BatchToSpaceNdLayer&>(child).GetParameters();
         if (batchToSpaceDesc.m_DataLayout != DataLayout::NHWC)
index a7b23db..c7883ff 100644 (file)
@@ -203,8 +203,8 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
     {
         if(layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
         {
-            BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
-            BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
+            ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
+            ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
         }
     }
 
@@ -223,18 +223,18 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
     {
         if (layer->GetType()==LayerType::Floor || layer->GetType() == LayerType::Addition)
         {
-            BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
-            BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
+            ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
+            ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
         }
         else if (layer->GetType() == LayerType::ConvertFp16ToFp32)
         {
-            BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
-            BOOST_ASSERT(layer->GetDataType() == DataType::Float16);
+            ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float32);
+            ARMNN_ASSERT(layer->GetDataType() == DataType::Float16);
         }
         else if (layer->GetType() == LayerType::ConvertFp32ToFp16)
         {
-            BOOST_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
-            BOOST_ASSERT(layer->GetDataType() == DataType::Float32);
+            ARMNN_ASSERT(layer->GetOutputSlot(0).GetTensorInfo().GetDataType() == DataType::Float16);
+            ARMNN_ASSERT(layer->GetDataType() == DataType::Float32);
         }
     }
 
index ef9b2da..ebdfbc5 100644 (file)
@@ -336,7 +336,7 @@ TensorInfo GetInputTensorInfo(const Network* network)
 {
     for (auto&& inputLayer : network->GetGraph().GetInputLayers())
     {
-        BOOST_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+        ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
         return inputLayer->GetOutputSlot(0).GetTensorInfo();
     }
     throw InvalidArgumentException("Network has no input layers");
index 3f85893..ca148ed 100644 (file)
@@ -5,10 +5,10 @@
 #pragma once
 
 #include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <QuantizeHelper.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/multi_array.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/random/uniform_real_distribution.hpp>
@@ -192,7 +192,7 @@ boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo)
 template <typename T, std::size_t n>
 boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat)
 {
-    BOOST_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
+    ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
 
     std::array<unsigned int, n> shape;
 
index 8ef820b..6d7d02d 100644 (file)
@@ -5,15 +5,15 @@
 
 #include "TestUtils.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 using namespace armnn;
 
 void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
              unsigned int fromIndex, unsigned int toIndex)
 {
-    BOOST_ASSERT(from);
-    BOOST_ASSERT(to);
+    ARMNN_ASSERT(from);
+    ARMNN_ASSERT(to);
 
     from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
     from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
index ce5c5bd..b95d3bc 100644 (file)
@@ -13,8 +13,9 @@
 #include "GraphTopologicalSort.hpp"
 #include "VerificationHelpers.hpp"
 
+#include <armnn/utility/Assert.hpp>
+
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 // Caffe
@@ -363,7 +364,7 @@ vector<const LayerParameter*> CaffeParserBase::GetInputs(const LayerParameter& l
 
 void CaffeParserBase::ParseInputLayer(const LayerParameter& layerParam)
 {
-    BOOST_ASSERT(layerParam.type() == "Input");
+    ARMNN_ASSERT(layerParam.type() == "Input");
     ValidateNumInputsOutputs(layerParam, 0, 1);
 
     const InputParameter& param = layerParam.input_param();
@@ -421,7 +422,7 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
                                              unsigned int kernelW,
                                              unsigned int kernelH)
 {
-    BOOST_ASSERT(layerParam.type() == "Convolution");
+    ARMNN_ASSERT(layerParam.type() == "Convolution");
     ValidateNumInputsOutputs(layerParam, 1, 1);
 
     ConvolutionParameter convParam = layerParam.convolution_param();
@@ -429,8 +430,8 @@ void CaffeParserBase::AddConvLayerWithSplits(const caffe::LayerParameter& layerP
     const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
 
     // asusme these were already verified by the caller ParseConvLayer() function
-    BOOST_ASSERT(numGroups < inputShape.dim(1));
-    BOOST_ASSERT(numGroups > 1);
+    ARMNN_ASSERT(numGroups < inputShape.dim(1));
+    ARMNN_ASSERT(numGroups > 1);
 
     // Handle grouping
     armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
@@ -613,7 +614,7 @@ void CaffeParserBase::AddConvLayerWithDepthwiseConv(const caffe::LayerParameter&
                                                     unsigned int kernelW,
                                                     unsigned int kernelH)
 {
-    BOOST_ASSERT(layerParam.type() == "Convolution");
+    ARMNN_ASSERT(layerParam.type() == "Convolution");
     ValidateNumInputsOutputs(layerParam, 1, 1);
 
     ConvolutionParameter convParam  = layerParam.convolution_param();
@@ -711,7 +712,7 @@ void CaffeParserBase::ParseConvLayer(const LayerParameter& layerParam)
     // Not Available ArmNN Interface Parameters
     // * Rounding policy;
 
-    BOOST_ASSERT(layerParam.type() == "Convolution");
+    ARMNN_ASSERT(layerParam.type() == "Convolution");
     ValidateNumInputsOutputs(layerParam, 1, 1);
 
     ConvolutionParameter convParam = layerParam.convolution_param();
index 58232a2..2975675 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <armnnUtils/Permute.hpp>
 #include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <ParserHelper.hpp>
@@ -20,7 +21,6 @@
 
 #include <boost/filesystem.hpp>
 #include <boost/format.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
@@ -725,7 +725,7 @@ Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(const uint8_t* binaryCo
 INetworkPtr Deserializer::CreateNetworkFromGraph(GraphPtr graph)
 {
     m_Network = INetwork::Create();
-    BOOST_ASSERT(graph != nullptr);
+    ARMNN_ASSERT(graph != nullptr);
     unsigned int layerIndex = 0;
     for (AnyLayer const* layer : *graph->layers())
     {
@@ -883,7 +883,7 @@ void Deserializer::SetupInputLayers(GraphPtr graph)
 
         // GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
         LayerBindingId bindingId = GetBindingLayerInfo(graph, inputLayerIndex);
-        BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
+        ARMNN_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
 
         IConnectableLayer* inputLayer =
             m_Network->AddInputLayer(bindingId, baseLayer->layerName()->c_str());
@@ -922,7 +922,7 @@ void Deserializer::SetupOutputLayers(GraphPtr graph)
 
         // GetBindingLayerInfo expect the index to be index in the vector not index property on each layer base
         LayerBindingId bindingId = GetBindingLayerInfo(graph, outputLayerIndex);
-        BOOST_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
+        ARMNN_ASSERT_MSG(baseLayer->layerName()->c_str(), "Input has no name.");
 
         IConnectableLayer* outputLayer =
             m_Network->AddOutputLayer(bindingId, baseLayer->layerName()->c_str());
@@ -944,7 +944,7 @@ void Deserializer::RegisterOutputSlots(GraphPtr graph,
                                        IConnectableLayer* layer)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
     if (baseLayer->outputSlots()->size() != layer->GetNumOutputSlots())
     {
@@ -971,7 +971,7 @@ void Deserializer::RegisterInputSlots(GraphPtr graph,
                                       armnn::IConnectableLayer* layer)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     LayerBaseRawPtr baseLayer = GetBaseLayer(graph, layerIndex);
     if (baseLayer->inputSlots()->size() != layer->GetNumInputSlots())
     {
@@ -1845,7 +1845,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported pooling algorithm");
+            ARMNN_ASSERT_MSG(false, "Unsupported pooling algorithm");
         }
     }
 
@@ -1863,7 +1863,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported output shape rounding");
+            ARMNN_ASSERT_MSG(false, "Unsupported output shape rounding");
         }
     }
 
@@ -1881,7 +1881,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported padding method");
+            ARMNN_ASSERT_MSG(false, "Unsupported padding method");
         }
     }
 
@@ -1899,7 +1899,7 @@ armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::Pool
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported data layout");
+            ARMNN_ASSERT_MSG(false, "Unsupported data layout");
         }
     }
 
@@ -2197,7 +2197,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported normalization channel type");
+            ARMNN_ASSERT_MSG(false, "Unsupported normalization channel type");
         }
     }
 
@@ -2215,7 +2215,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported normalization method type");
+            ARMNN_ASSERT_MSG(false, "Unsupported normalization method type");
         }
     }
 
@@ -2233,7 +2233,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported data layout");
+            ARMNN_ASSERT_MSG(false, "Unsupported data layout");
         }
     }
 
index 91d07f3..bb38d5f 100644 (file)
 #include <ArmnnSchema_generated.h>
 #include <armnn/IRuntime.hpp>
 #include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <ResolveType.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 
@@ -96,10 +96,10 @@ struct ParserFlatbuffersSerializeFixture
         flatbuffers::Parser parser;
 
         bool ok = parser.Parse(schemafile.c_str());
-        BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+        ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
 
         ok &= parser.Parse(m_JsonString.c_str());
-        BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+        ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
 
         if (!ok)
         {
index e425998..455bd87 100644 (file)
@@ -5,6 +5,7 @@
 #include "OnnxParser.hpp"
 
 #include <armnn/Descriptors.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <VerificationHelpers.hpp>
 
 #include <boost/format.hpp>
@@ -388,7 +389,7 @@ std::vector<TensorInfo> OnnxParser::ComputeOutputInfo(std::vector<std::string> o
                                                        const IConnectableLayer* layer,
                                                        std::vector<TensorShape> inputShapes)
 {
-    BOOST_ASSERT(! outNames.empty());
+    ARMNN_ASSERT(! outNames.empty());
     bool needCompute = std::any_of(outNames.begin(),
                                    outNames.end(),
                                    [this](std::string name)
@@ -401,7 +402,7 @@ std::vector<TensorInfo> OnnxParser::ComputeOutputInfo(std::vector<std::string> o
      if(needCompute)
      {
          inferredShapes = layer->InferOutputShapes(inputShapes);
-         BOOST_ASSERT(inferredShapes.size() == outNames.size());
+         ARMNN_ASSERT(inferredShapes.size() == outNames.size());
      }
      for (uint i = 0; i < outNames.size(); ++i)
      {
@@ -607,7 +608,7 @@ INetworkPtr OnnxParser::CreateNetworkFromModel(onnx::ModelProto& model)
 
 void OnnxParser::LoadGraph()
 {
-    BOOST_ASSERT(m_Graph.get() != nullptr);
+    ARMNN_ASSERT(m_Graph.get() != nullptr);
 
     //Fill m_TensorsInfo with the shapes and value of every tensor
     SetupInfo(m_Graph->mutable_output());
@@ -851,7 +852,7 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
                                                   CreateConstTensor(weightName).first,
                                                   Optional<ConstTensor>(CreateConstTensor(biasName).first),
                                                   matmulNode.name().c_str());
-        BOOST_ASSERT(layer != nullptr);
+        ARMNN_ASSERT(layer != nullptr);
 
         auto outputInfo = ComputeOutputInfo({addNode->output(0)}, layer,
                                             {m_TensorsInfo[inputName].m_info->GetShape(),
@@ -868,7 +869,7 @@ void OnnxParser::AddFullyConnected(const onnx::NodeProto& matmulNode, const onnx
                                                   CreateConstTensor(weightName).first,
                                                   EmptyOptional(),
                                                   matmulNode.name().c_str());
-        BOOST_ASSERT(layer != nullptr);
+        ARMNN_ASSERT(layer != nullptr);
 
         auto outputInfo = ComputeOutputInfo({matmulNode.output(0)}, layer,
                                             {m_TensorsInfo[inputName].m_info->GetShape(),
@@ -932,7 +933,7 @@ void OnnxParser::ParseGlobalAveragePool(const onnx::NodeProto& node)
     desc.m_PoolHeight = inputShape[2];
 
     IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {inputShape});
     layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1026,7 +1027,7 @@ void OnnxParser::AddPoolingLayer(const onnx::NodeProto& node, Pooling2dDescripto
     }
 
     IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, node.name().c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
     layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1048,7 +1049,7 @@ void OnnxParser::CreateReshapeLayer(const std::string& inputName,
     reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
 
     IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // register the input connection slots for the layer, connections are made after all layers have been created
@@ -1121,7 +1122,7 @@ void OnnxParser::ParseActivation(const onnx::NodeProto& node, const armnn::Activ
     }
 
     IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, node.name().c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({ node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
     layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1161,7 +1162,7 @@ void OnnxParser::ParseLeakyRelu(const onnx::NodeProto& node)
 
 void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, const Convolution2dDescriptor& convDesc)
 {
-    BOOST_ASSERT(node.op_type() == "Conv");
+    ARMNN_ASSERT(node.op_type() == "Conv");
 
     DepthwiseConvolution2dDescriptor desc;
     desc.m_PadLeft      = convDesc.m_PadLeft;
@@ -1203,7 +1204,7 @@ void OnnxParser::AddConvLayerWithDepthwiseConv(const onnx::NodeProto& node, cons
                                                           EmptyOptional(),
                                                           node.name().c_str());
     }
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
                                         { m_TensorsInfo[node.input(0)].m_info->GetShape(),
@@ -1403,7 +1404,7 @@ void OnnxParser::ParseConv(const onnx::NodeProto& node)
                                                  EmptyOptional(),
                                                  node.name().c_str());
     }
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
                                         { m_TensorsInfo[node.input(0)].m_info->GetShape(),
@@ -1494,7 +1495,7 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
      auto inputs = AddPrepareBroadcast(node.input(0), node.input(1));
      auto input0 = *m_TensorsInfo[inputs.first].m_info;
      auto input1 = *m_TensorsInfo[inputs.second].m_info;
-     BOOST_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+     ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
 
      unsigned int numDims = input0.GetNumDimensions();
      for (unsigned int i = 0; i < numDims; i++)
@@ -1518,7 +1519,7 @@ void OnnxParser::ParseAdd(const onnx::NodeProto& node)
 
 
      IConnectableLayer* layer = m_Network->AddAdditionLayer(node.name().c_str());
-     BOOST_ASSERT(layer != nullptr);
+     ARMNN_ASSERT(layer != nullptr);
 
      auto outputInfo = ComputeOutputInfo({ node.output(0) }, layer,
                                          { m_TensorsInfo[inputs.first].m_info->GetShape(),
@@ -1574,7 +1575,7 @@ void OnnxParser::ParseBatchNormalization(const onnx::NodeProto& node)
                                                                      biasTensor.first,
                                                                      scaleTensor.first,
                                                                      node.name().c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     auto outputInfo = ComputeOutputInfo({node.output(0)}, layer, {m_TensorsInfo[node.input(0)].m_info->GetShape()});
     layer->GetOutputSlot(0).SetTensorInfo(outputInfo[0]);
@@ -1623,7 +1624,7 @@ void OnnxParser::SetupOutputLayers()
 
 void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     if (tensorIds.size() != layer->GetNumInputSlots())
     {
         throw ParseException(
@@ -1650,7 +1651,7 @@ void OnnxParser::RegisterInputSlots(IConnectableLayer* layer, const std::vector<
 
 void OnnxParser::RegisterOutputSlots(IConnectableLayer* layer, const std::vector<std::string>& tensorIds)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     if (tensorIds.size() != layer->GetNumOutputSlots())
     {
         throw ParseException(
index a0c673a..cbb10d7 100644 (file)
@@ -9,6 +9,7 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Logging.hpp>
 #include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 // armnnUtils:
@@ -22,7 +23,6 @@
 
 #include <flatbuffers/flexbuffers.h>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/filesystem.hpp>
@@ -131,11 +131,11 @@ void CheckTensor(const TfLiteParser::ModelPtr & model,
 {
     // not checking model, because I assume CHECK_MODEL already run
     // and checked that. An assert would do.
-    BOOST_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
+    ARMNN_ASSERT_MSG(model.get() != nullptr, "Expecting a valid model in this function");
 
     // also subgraph index should be checked by CHECK_MODEL so
     // I only add an assert here
-    BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
+    ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(), "Expecting a valid subgraph index");
 
     // the tensor index is the only one to check here
     if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
@@ -435,8 +435,8 @@ CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
                       armnn::Optional<armnn::PermutationVector&> permutationVector)
 {
     IgnoreUnused(tensorPtr);
-    BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
-    BOOST_ASSERT_MSG(bufferPtr != nullptr,
+    ARMNN_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
+    ARMNN_ASSERT_MSG(bufferPtr != nullptr,
         boost::str(
             boost::format("Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
 
@@ -543,12 +543,12 @@ void TfLiteParser::AddBroadcastReshapeLayer(size_t subgraphIndex,
                                             IConnectableLayer *layer)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
     const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
 
-    BOOST_ASSERT(operatorPtr->inputs.size() > 1);
+    ARMNN_ASSERT(operatorPtr->inputs.size() > 1);
 
     uint32_t reshapedInputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[0]);
     TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
@@ -612,7 +612,7 @@ INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & b
 INetworkPtr TfLiteParser::CreateNetworkFromModel()
 {
     m_Network = INetwork::Create();
-    BOOST_ASSERT(m_Model.get() != nullptr);
+    ARMNN_ASSERT(m_Model.get() != nullptr);
 
     bool failedToCreate = false;
     std::stringstream errors;
@@ -710,8 +710,8 @@ void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
                                             armnn::IOutputSlot* slot)
 {
     CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
-    BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
-    BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+    ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+    ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
 
     TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
 
@@ -734,8 +734,8 @@ void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
                                             armnn::IInputSlot* slot)
 {
     CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
-    BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
-    BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
+    ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
+    ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
 
     TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
     tensorSlots.inputSlots.push_back(slot);
@@ -878,7 +878,7 @@ void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
                                                  layerName.c_str());
     }
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -965,7 +965,7 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
                                                           EmptyOptional(),
                                                           layerName.c_str());
     }
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -994,7 +994,7 @@ void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
     auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
 
     IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1035,7 +1035,7 @@ void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
 
     layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1104,7 +1104,7 @@ void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex
                                                       EmptyOptional(),
                                                       layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1185,7 +1185,7 @@ void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorInd
     auto layerName = boost::str(boost::format("L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
     IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1292,7 +1292,7 @@ void TfLiteParser::ParsePool(size_t subgraphIndex,
                 boost::str(boost::format("MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
             break;
         default:
-            BOOST_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
+            ARMNN_ASSERT_MSG(false, "Unsupported Pooling Algorithm");
     }
 
     Pooling2dDescriptor desc;
@@ -1324,7 +1324,7 @@ void TfLiteParser::ParsePool(size_t subgraphIndex,
 
     IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -1798,7 +1798,7 @@ void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
     auto layerName = boost::str(boost::format("Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
 
     IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2125,7 +2125,7 @@ void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex
     auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
     IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -2198,7 +2198,7 @@ void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorInde
                                                   EmptyOptional(),
                                                   layerName.c_str());
     }
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
 
@@ -2305,7 +2305,7 @@ void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operat
     IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
                                                                        layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     // The model does not specify the output shapes.
     // The output shapes are calculated from the max_detection and max_classes_per_detection.
@@ -2362,7 +2362,7 @@ void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
     auto layerName = boost::str(boost::format("Pack:%1%:%2%") % subgraphIndex % operatorIndex);
     IConnectableLayer* layer = m_Network->AddStackLayer(desc, layerName.c_str());
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
@@ -2504,7 +2504,7 @@ void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
     std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
     ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
 
-    BOOST_ASSERT(axisTensorInfo.GetNumElements() == 1);
+    ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
     const unsigned int splitDim = axisData[0];
 
     auto inputDimSize = inputTensorInfo.GetNumDimensions();
@@ -2764,7 +2764,7 @@ void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
                                       const std::vector<unsigned int>& tensorIndexes)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     if (tensorIndexes.size() != layer->GetNumInputSlots())
     {
         throw ParseException(
@@ -2791,7 +2791,7 @@ void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
                                        const std::vector<unsigned int>& tensorIndexes)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
     if (tensorIndexes.size() != layer->GetNumOutputSlots())
     {
         throw ParseException(
index 797e11e..56811b5 100644 (file)
@@ -11,6 +11,7 @@
 #include <armnn/IRuntime.hpp>
 #include <armnn/TypesUtils.hpp>
 #include <armnn/BackendRegistry.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <armnnTfLiteParser/ITfLiteParser.hpp>
 
@@ -19,7 +20,6 @@
 #include <test/TensorHelpers.hpp>
 
 #include <boost/filesystem.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 #include "flatbuffers/idl.h"
@@ -107,10 +107,10 @@ struct ParserFlatbuffersFixture
         flatbuffers::Parser parser;
 
         bool ok = parser.Parse(schemafile.c_str());
-        BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+        ARMNN_ASSERT_MSG(ok, "Failed to parse schema file");
 
         ok &= parser.Parse(m_JsonString.c_str());
-        BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+        ARMNN_ASSERT_MSG(ok, "Failed to parse json input");
 
         if (!ok)
         {
index 39dee67..21392ac 100644 (file)
@@ -7,10 +7,10 @@
 #include "../TfLiteParser.hpp"
 
 #include <armnn/LayerVisitorBase.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <layers/StandInLayer.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/polymorphic_cast.hpp>
 #include <boost/test/unit_test.hpp>
 
@@ -78,10 +78,10 @@ public:
         , m_StandInLayerVerifier(inputInfos, outputInfos)
     {
         const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
-        BOOST_ASSERT(numInputs > 0);
+        ARMNN_ASSERT(numInputs > 0);
 
         const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
-        BOOST_ASSERT(numOutputs > 0);
+        ARMNN_ASSERT(numOutputs > 0);
 
         m_JsonString = R"(
             {
index 793bd0e..491a964 100755 (executable)
@@ -468,7 +468,7 @@ public:
 
     IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
     {
-        BOOST_ASSERT(m_Layer);
+        ARMNN_ASSERT(m_Layer);
         // Assumes one-to-one mapping between Tf and armnn output slots.
         unsigned int armnnOutputSlotIdx = tfOutputIndex;
         if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
@@ -858,7 +858,7 @@ public:
 
     virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
     {
-        BOOST_ASSERT(m_Representative);
+        ARMNN_ASSERT(m_Representative);
         return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
     }
 
@@ -892,12 +892,12 @@ public:
         m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
         m_TensorInfo(tensorInfo)
     {
-        BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
+        ARMNN_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
     }
 
     void CreateLayerDeferred() override
     {
-        BOOST_ASSERT(m_Layer == nullptr);
+        ARMNN_ASSERT(m_Layer == nullptr);
         m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
         m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
     }
@@ -1068,7 +1068,7 @@ struct InvokeParseFunction
 ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
-    BOOST_ASSERT(nodeDef.op() == "Const");
+    ARMNN_ASSERT(nodeDef.op() == "Const");
 
     if (nodeDef.attr().count("value") == 0)
     {
@@ -1467,7 +1467,7 @@ ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& n
 
 TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
 {
-    BOOST_ASSERT(nodeDef.op() == "ExpandDims");
+    ARMNN_ASSERT(nodeDef.op() == "ExpandDims");
 
     if (inputTensorInfo.GetNumDimensions() > 4) {
         throw ParseException(
@@ -1679,10 +1679,10 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef
         size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
         std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
 
-        BOOST_ASSERT(inputs.size() == 2);
-        BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
-        BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
-        BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
+        ARMNN_ASSERT(inputs.size() == 2);
+        ARMNN_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
+        ARMNN_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
+        ARMNN_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
 
         if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
         {
@@ -1744,7 +1744,7 @@ ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
         IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
         IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
     {
-        BOOST_ASSERT(outputOfLeakyRelu != nullptr);
+        ARMNN_ASSERT(outputOfLeakyRelu != nullptr);
 
         IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
         outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
@@ -2091,7 +2091,7 @@ ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef
     const auto desc              = TransposeDescriptor(permutationVector);
 
     auto* layer = m_Network->AddTransposeLayer(desc, nodeDef.name().c_str());
-    BOOST_ASSERT(layer);
+    ARMNN_ASSERT(layer);
 
     input0Slot->Connect(layer->GetInputSlot(0));
 
@@ -2462,7 +2462,7 @@ ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& no
 
 TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
 {
-    BOOST_ASSERT(nodeDef.op() == "Squeeze");
+    ARMNN_ASSERT(nodeDef.op() == "Squeeze");
     tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
 
     DataType type;
@@ -2598,7 +2598,7 @@ public:
 
     void CreateLayerDeferred() override
     {
-        BOOST_ASSERT(m_Layer == nullptr);
+        ARMNN_ASSERT(m_Layer == nullptr);
         m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
     }
 };
@@ -2681,7 +2681,7 @@ public:
 
     void CreateLayerDeferred() override
     {
-        BOOST_ASSERT(m_Layer == nullptr);
+        ARMNN_ASSERT(m_Layer == nullptr);
         m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
     }
 };
@@ -3393,7 +3393,7 @@ IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& m
     }
     layer = m_Network->AddFullyConnectedLayer(desc, weights, optionalBiases, armnnLayerName);
 
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
     unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
index 19affa8..16b1124 100644 (file)
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include "armnnTfParser/ITfParser.hpp"
@@ -19,7 +19,7 @@ struct AddNFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITf
 {
     AddNFixture(const std::vector<armnn::TensorShape> inputShapes, unsigned int numberOfInputs)
     {
-        BOOST_ASSERT(inputShapes.size() == numberOfInputs);
+        ARMNN_ASSERT(inputShapes.size() == numberOfInputs);
         m_Prototext = "";
         for (unsigned int i = 0; i < numberOfInputs; i++)
         {
index aead1fe..cf71489 100644 (file)
@@ -152,7 +152,7 @@ struct Convolution2dFixture : public armnnUtils::ParserPrototxtFixture<armnnTfPa
                            "} \n");
 
         // Manual height computation based on stride parameter.
-        BOOST_ASSERT_MSG(stride == 1 || stride == 2, "Add support for strides other than 1 or 2.");
+        ARMNN_ASSERT_MSG(stride == 1 || stride == 2, "Add support for strides other than 1 or 2.");
         std::array<unsigned int, 4> dims;
         if (dataLayout == "NHWC")
         {
index 7416ff6..80043a9 100644 (file)
@@ -5,7 +5,6 @@
 
 #include "DotSerializer.hpp"
 
-#include <boost/assert.hpp>
 #include <boost/algorithm/string/replace.hpp>
 #include <sstream>
 #include <cstring>
index 2216824..5d89a25 100644 (file)
@@ -8,7 +8,7 @@
 #include "BFloat16.hpp"
 #include "Half.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnnUtils
 {
@@ -17,8 +17,8 @@ void FloatingPointConverter::ConvertFloat32To16(const float* srcFloat32Buffer,
                                                 size_t numElements,
                                                 void* dstFloat16Buffer)
 {
-    BOOST_ASSERT(srcFloat32Buffer != nullptr);
-    BOOST_ASSERT(dstFloat16Buffer != nullptr);
+    ARMNN_ASSERT(srcFloat32Buffer != nullptr);
+    ARMNN_ASSERT(dstFloat16Buffer != nullptr);
 
     armnn::Half* pHalf = reinterpret_cast<armnn::Half*>(dstFloat16Buffer);
 
@@ -32,8 +32,8 @@ void FloatingPointConverter::ConvertFloat16To32(const void* srcFloat16Buffer,
                                                 size_t numElements,
                                                 float* dstFloat32Buffer)
 {
-    BOOST_ASSERT(srcFloat16Buffer != nullptr);
-    BOOST_ASSERT(dstFloat32Buffer != nullptr);
+    ARMNN_ASSERT(srcFloat16Buffer != nullptr);
+    ARMNN_ASSERT(dstFloat32Buffer != nullptr);
 
     const armnn::Half* pHalf = reinterpret_cast<const armnn::Half*>(srcFloat16Buffer);
 
@@ -47,8 +47,8 @@ void FloatingPointConverter::ConvertFloat32ToBFloat16(const float* srcFloat32Buf
                                                       size_t numElements,
                                                       void* dstBFloat16Buffer)
 {
-    BOOST_ASSERT(srcFloat32Buffer != nullptr);
-    BOOST_ASSERT(dstBFloat16Buffer != nullptr);
+    ARMNN_ASSERT(srcFloat32Buffer != nullptr);
+    ARMNN_ASSERT(dstBFloat16Buffer != nullptr);
 
     armnn::BFloat16* bf16 = reinterpret_cast<armnn::BFloat16*>(dstBFloat16Buffer);
 
@@ -62,8 +62,8 @@ void FloatingPointConverter::ConvertBFloat16ToFloat32(const void* srcBFloat16Buf
                                                       size_t numElements,
                                                       float* dstFloat32Buffer)
 {
-    BOOST_ASSERT(srcBFloat16Buffer != nullptr);
-    BOOST_ASSERT(dstFloat32Buffer != nullptr);
+    ARMNN_ASSERT(srcBFloat16Buffer != nullptr);
+    ARMNN_ASSERT(dstFloat32Buffer != nullptr);
 
     const armnn::BFloat16* bf16 = reinterpret_cast<const armnn::BFloat16*>(srcBFloat16Buffer);
 
index 1131459..f3c4b19 100644 (file)
@@ -5,7 +5,6 @@
 #pragma once
 
 #include <armnn/Optional.hpp>
-#include <boost/assert.hpp>
 
 #include <functional>
 #include <map>
index 818cb17..d197dc8 100644 (file)
@@ -64,7 +64,7 @@ std::vector<std::string>
 // Remove any preceding and trailing character specified in the characterSet.
 std::string Strip(const std::string& originalString, const std::string& characterSet)
 {
-    BOOST_ASSERT(!characterSet.empty());
+    ARMNN_ASSERT(!characterSet.empty());
     const std::size_t firstFound = originalString.find_first_not_of(characterSet);
     const std::size_t lastFound  = originalString.find_last_not_of(characterSet);
     // Return empty if the originalString is empty or the originalString contains only to-be-striped characters
index c4dd4f1..6595a52 100644 (file)
@@ -7,7 +7,7 @@
 
 #include <algorithm>
 #include <armnn/Types.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <boost/variant/apply_visitor.hpp>
 #include <cstddef>
 #include <functional>
index 535d68a..952c768 100644 (file)
@@ -6,8 +6,8 @@
 #include <armnnUtils/TensorUtils.hpp>
 
 #include <armnn/backends/ITensorHandle.hpp>
+#include <armnn/utility/Assert.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
@@ -114,8 +114,8 @@ unsigned int GetNumElementsBetween(const TensorShape& shape,
                                    const unsigned int firstAxisInclusive,
                                    const unsigned int lastAxisExclusive)
 {
-    BOOST_ASSERT(firstAxisInclusive <= lastAxisExclusive);
-    BOOST_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
+    ARMNN_ASSERT(firstAxisInclusive <= lastAxisExclusive);
+    ARMNN_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
     unsigned int count = 1;
     for (unsigned int i = firstAxisInclusive; i < lastAxisExclusive; i++)
     {
@@ -126,9 +126,9 @@ unsigned int GetNumElementsBetween(const TensorShape& shape,
 
 unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
 {
-    BOOST_ASSERT_MSG(axis < boost::numeric_cast<int>(inputDimension),
+    ARMNN_ASSERT_MSG(axis < boost::numeric_cast<int>(inputDimension),
                      "Required axis index greater than number of dimensions.");
-    BOOST_ASSERT_MSG(axis >= -boost::numeric_cast<int>(inputDimension),
+    ARMNN_ASSERT_MSG(axis >= -boost::numeric_cast<int>(inputDimension),
                      "Required axis index lower than negative of the number of dimensions");
 
     unsigned int uAxis = axis < 0  ?
@@ -140,7 +140,7 @@ unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
 unsigned int GetNumElementsAfter(const armnn::TensorShape& shape, unsigned int axis)
 {
     unsigned int numDim = shape.GetNumDimensions();
-    BOOST_ASSERT(axis <= numDim - 1);
+    ARMNN_ASSERT(axis <= numDim - 1);
     unsigned int count = 1;
     for (unsigned int i = axis; i < numDim; i++)
     {
index dc37450..dbf0673 100644 (file)
@@ -29,8 +29,8 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData1, keepDims, outputTensorInfo1);
 
-    BOOST_ASSERT(outputTensorInfo1.GetNumDimensions() == 1);
-    BOOST_ASSERT(outputTensorInfo1.GetShape()[0] == 1);
+    BOOST_TEST(outputTensorInfo1.GetNumDimensions() == 1);
+    BOOST_TEST(outputTensorInfo1.GetShape()[0] == 1);
 
     // Reducing dimension 0 results in a 3x4 size tensor (one dimension)
     std::set<unsigned int> axisData2 = { 0 };
@@ -38,8 +38,8 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData2, keepDims, outputTensorInfo2);
 
-    BOOST_ASSERT(outputTensorInfo2.GetNumDimensions() == 1);
-    BOOST_ASSERT(outputTensorInfo2.GetShape()[0] == 12);
+    BOOST_TEST(outputTensorInfo2.GetNumDimensions() == 1);
+    BOOST_TEST(outputTensorInfo2.GetShape()[0] == 12);
 
     // Reducing dimensions 0,1 results in a 4 size tensor (one dimension)
     std::set<unsigned int> axisData3 = { 0, 1 };
@@ -47,8 +47,8 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData3, keepDims, outputTensorInfo3);
 
-    BOOST_ASSERT(outputTensorInfo3.GetNumDimensions() == 1);
-    BOOST_ASSERT(outputTensorInfo3.GetShape()[0] == 4);
+    BOOST_TEST(outputTensorInfo3.GetNumDimensions() == 1);
+    BOOST_TEST(outputTensorInfo3.GetShape()[0] == 4);
 
     // Reducing dimension 0 results in a { 1, 3, 4 } dimension tensor
     keepDims = true;
@@ -58,10 +58,10 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData4, keepDims, outputTensorInfo4);
 
-    BOOST_ASSERT(outputTensorInfo4.GetNumDimensions() == 3);
-    BOOST_ASSERT(outputTensorInfo4.GetShape()[0] == 1);
-    BOOST_ASSERT(outputTensorInfo4.GetShape()[1] == 3);
-    BOOST_ASSERT(outputTensorInfo4.GetShape()[2] == 4);
+    BOOST_TEST(outputTensorInfo4.GetNumDimensions() == 3);
+    BOOST_TEST(outputTensorInfo4.GetShape()[0] == 1);
+    BOOST_TEST(outputTensorInfo4.GetShape()[1] == 3);
+    BOOST_TEST(outputTensorInfo4.GetShape()[2] == 4);
 
     // Reducing dimension 1, 2 results in a { 2, 1, 1 } dimension tensor
     keepDims = true;
@@ -71,10 +71,10 @@ BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData5,  keepDims, outputTensorInfo5);
 
-    BOOST_ASSERT(outputTensorInfo5.GetNumDimensions() == 3);
-    BOOST_ASSERT(outputTensorInfo5.GetShape()[0] == 2);
-    BOOST_ASSERT(outputTensorInfo5.GetShape()[1] == 1);
-    BOOST_ASSERT(outputTensorInfo5.GetShape()[2] == 1);
+    BOOST_TEST(outputTensorInfo5.GetNumDimensions() == 3);
+    BOOST_TEST(outputTensorInfo5.GetShape()[0] == 2);
+    BOOST_TEST(outputTensorInfo5.GetShape()[1] == 1);
+    BOOST_TEST(outputTensorInfo5.GetShape()[2] == 1);
 
 }
 
index f263a52..d51c801 100644 (file)
@@ -15,28 +15,28 @@ BOOST_AUTO_TEST_CASE(ConvertInt32ToOctalStringTest)
     using armnnUtils::ConvertInt32ToOctalString;
 
     std::string octalString = ConvertInt32ToOctalString(1);
-    BOOST_ASSERT(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
+    BOOST_TEST(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
 
     octalString = ConvertInt32ToOctalString(256);
-    BOOST_ASSERT(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
+    BOOST_TEST(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
 
     octalString = ConvertInt32ToOctalString(65536);
-    BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
+    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
 
     octalString = ConvertInt32ToOctalString(16777216);
-    BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
+    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
 
     octalString = ConvertInt32ToOctalString(-1);
-    BOOST_ASSERT(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
+    BOOST_TEST(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
 
     octalString = ConvertInt32ToOctalString(-256);
-    BOOST_ASSERT(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
+    BOOST_TEST(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
 
     octalString = ConvertInt32ToOctalString(-65536);
-    BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
+    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
 
     octalString = ConvertInt32ToOctalString(-16777216);
-    BOOST_ASSERT(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
+    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
 }
 
 BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
@@ -51,13 +51,13 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
     };
 
     auto output_string = createAndConvert({5});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
         "size: 5\n"
         "}"));
 
     output_string = createAndConvert({4, 5});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
             "size: 4\n"
         "}\n"
@@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
         ));
 
     output_string = createAndConvert({3, 4, 5});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
             "size: 3\n"
         "}\n"
@@ -80,7 +80,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
         ));
 
     output_string = createAndConvert({2, 3, 4, 5});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
             "size: 2\n"
         "}\n"
@@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
         ));
 
     output_string = createAndConvert({1, 2, 3, 4, 5});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
             "size: 1\n"
         "}\n"
@@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
         ));
 
     output_string = createAndConvert({0xffffffff, 0xffffffff});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
             "size: 4294967295\n"
         "}\n"
@@ -125,7 +125,7 @@ BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
         ));
 
     output_string = createAndConvert({1, 0});
-    BOOST_ASSERT(output_string.compare(
+    BOOST_TEST(output_string.compare(
         "dim {\n"
             "size: 1\n"
         "}\n"
index f5a9e05..7a75f9c 100644 (file)
@@ -42,7 +42,7 @@ arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multi
         case armnn::DataType::Signed32:
             return arm_compute::DataType::S32;
         default:
-            BOOST_ASSERT_MSG(false, "Unknown data type");
+            ARMNN_ASSERT_MSG(false, "Unknown data type");
             return arm_compute::DataType::UNKNOWN;
     }
 }
index 9c6f464..80bb762 100644 (file)
@@ -6,11 +6,10 @@
 
 #include <armnn/Descriptors.hpp>
 #include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <arm_compute/core/Types.h>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
@@ -161,7 +160,7 @@ inline unsigned int ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc,
 
    unsigned int dim = tensor.GetNumDimensions();
 
-    BOOST_ASSERT(dim != 0);
+    ARMNN_ASSERT(dim != 0);
 
     // Currently ArmNN support axis 1.
     return dim - 1;
index 844fbcd..b43eaf8 100644 (file)
@@ -19,7 +19,7 @@ namespace armnn
 BaseMemoryManager::BaseMemoryManager(std::unique_ptr<arm_compute::IAllocator> alloc,
                                      MemoryAffinity memoryAffinity)
 {
-    BOOST_ASSERT(alloc);
+    ARMNN_ASSERT(alloc);
     m_Allocator = std::move(alloc);
 
     m_IntraLayerMemoryMgr = CreateArmComputeMemoryManager(memoryAffinity);
@@ -51,30 +51,30 @@ void BaseMemoryManager::Acquire()
     static const size_t s_NumPools = 1;
 
     // Allocate memory pools for intra-layer memory manager
-    BOOST_ASSERT(m_IntraLayerMemoryMgr);
+    ARMNN_ASSERT(m_IntraLayerMemoryMgr);
     m_IntraLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
 
     // Allocate memory pools for inter-layer memory manager
-    BOOST_ASSERT(m_InterLayerMemoryMgr);
+    ARMNN_ASSERT(m_InterLayerMemoryMgr);
     m_InterLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
 
     // Acquire inter-layer memory group. NOTE: This has to come after allocating the pools
-    BOOST_ASSERT(m_InterLayerMemoryGroup);
+    ARMNN_ASSERT(m_InterLayerMemoryGroup);
     m_InterLayerMemoryGroup->acquire();
 }
 
 void BaseMemoryManager::Release()
 {
     // Release inter-layer memory group. NOTE: This has to come before releasing the pools
-    BOOST_ASSERT(m_InterLayerMemoryGroup);
+    ARMNN_ASSERT(m_InterLayerMemoryGroup);
     m_InterLayerMemoryGroup->release();
 
     // Release memory pools managed by intra-layer memory manager
-    BOOST_ASSERT(m_IntraLayerMemoryMgr);
+    ARMNN_ASSERT(m_IntraLayerMemoryMgr);
     m_IntraLayerMemoryMgr->clear();
 
     // Release memory pools managed by inter-layer memory manager
-    BOOST_ASSERT(m_InterLayerMemoryMgr);
+    ARMNN_ASSERT(m_InterLayerMemoryMgr);
     m_InterLayerMemoryMgr->clear();
 }
 #else
index 65e6c47..7bcf59f 100644 (file)
@@ -118,8 +118,8 @@ void ScopedCpuTensorHandle::CopyFrom(const ScopedCpuTensorHandle& other)
 
 void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
 {
-    BOOST_ASSERT(GetTensor<void>() == nullptr);
-    BOOST_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
+    ARMNN_ASSERT(GetTensor<void>() == nullptr);
+    ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
 
     if (srcMemory)
     {
index e6e59fc..78efb08 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <algorithm>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -30,7 +30,7 @@ public:
     template <typename T>
     const T* GetConstTensor() const
     {
-        BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+        ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
         return reinterpret_cast<const T*>(m_Memory);
     }
 
@@ -59,8 +59,8 @@ protected:
 
 private:
     // Only used for testing
-    void CopyOutTo(void *) const override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
-    void CopyInFrom(const void*) override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
+    void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
+    void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
 
     ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
     ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
@@ -79,7 +79,7 @@ public:
     template <typename T>
     T* GetTensor() const
     {
-        BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+        ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
         return reinterpret_cast<T*>(m_MutableMemory);
     }
 
index 03bec53..ddecc82 100644 (file)
@@ -5,7 +5,7 @@
 
 #pragma once
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <algorithm>
 
 namespace armnn
@@ -30,7 +30,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
         case armnn::DataType::QAsymmS8:
             return armnn::DataType::Signed32;
         default:
-            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
     }
     return armnn::EmptyOptional();
 }
index 8abc8a6..5601822 100644 (file)
@@ -70,7 +70,7 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
         case DataType::QSymmS16:
             return nullptr;
         default:
-            BOOST_ASSERT_MSG(false, "Unknown DataType.");
+            ARMNN_ASSERT_MSG(false, "Unknown DataType.");
             return nullptr;
     }
 }
index 984443b..244b5f1 100644 (file)
@@ -65,9 +65,9 @@ public:
 
             if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end())
             {
-                BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+                ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
             }
-            BOOST_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
+            ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
                                          info.m_InputTensorInfos.end(),
                                          [&](auto it){
                                              return it.GetDataType() == expectedInputType;
@@ -84,14 +84,14 @@ public:
             {
                 if (expectedOutputType != expectedInputType)
                 {
-                    BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+                    ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
                 }
             }
             else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end())
             {
-                BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+                ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
             }
-            BOOST_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
+            ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
                                          info.m_OutputTensorInfos.end(),
                                          [&](auto it){
                                              return it.GetDataType() == expectedOutputType;
@@ -109,14 +109,14 @@ public:
     MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
         : BaseWorkload<QueueDescriptor>(descriptor, info)
     {
-        BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
+        ARMNN_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
                                      info.m_InputTensorInfos.end(),
                                      [&](auto it){
                                          return it.GetDataType() == InputDataType;
                                      }),
                          "Trying to create workload with incorrect type");
 
-        BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+        ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
                                      info.m_OutputTensorInfos.end(),
                                      [&](auto it){
                                          return it.GetDataType() == OutputDataType;
@@ -136,11 +136,11 @@ public:
     {
         if (!info.m_InputTensorInfos.empty())
         {
-            BOOST_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
+            ARMNN_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
                                  "Trying to create workload with incorrect type");
         }
 
-        BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+        ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
                                      info.m_OutputTensorInfos.end(),
                                      [&](auto it){
                                          return it.GetDataType() == DataType;
index f968ad7..1f4a849 100644 (file)
@@ -40,7 +40,7 @@ DataType GetBiasDataType(DataType inputDataType)
         case DataType::QSymmS16:
             return DataType::Signed32;
         default:
-            BOOST_ASSERT_MSG(false, "Invalid input data type");
+            ARMNN_ASSERT_MSG(false, "Invalid input data type");
             return DataType::Float32;
     }
 }
index 5628c36..a7e8576 100644 (file)
@@ -194,7 +194,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
                                                        dataType);
             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
 
             const Convolution2dDescriptor& descriptor  = cLayer->GetParameters();
 
@@ -244,7 +244,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
                                                        dataType);
             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
 
             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
 
@@ -335,7 +335,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
 
             TensorInfo biasInfo;
             const TensorInfo * biasInfoPtr = nullptr;
@@ -347,7 +347,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
             if (descriptor.m_BiasEnabled)
             {
-                BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+                ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
                 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
                 biasInfoPtr = &biasInfo;
             }
@@ -381,7 +381,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
                     }
                     default:
                     {
-                        BOOST_ASSERT_MSG(false, "Unexpected bias type");
+                        ARMNN_ASSERT_MSG(false, "Unexpected bias type");
                     }
                 }
             }
@@ -1156,12 +1156,12 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+                ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
                 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
                                           GetBiasTypeFromWeightsType(dataType));
             }
 
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
             const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
 
             result = layerSupportObject->IsTransposeConvolution2dSupported(input,
@@ -1175,7 +1175,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
+            ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
             reason.value() = "Unrecognised layer type";
             result = false;
             break;
index 3b3959b..bd5e81e 100644 (file)
@@ -13,8 +13,8 @@ namespace armnn
 armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
                                  const PermutationVector& permutationVector, void* permuteBuffer)
 {
-    BOOST_ASSERT_MSG(tensor, "Invalid input tensor");
-    BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+    ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
+    ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
 
     TensorInfo tensorInfo = tensor->GetTensorInfo();
 
@@ -133,8 +133,8 @@ armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle*
                                                      DataLayout dataLayout,
                                                      void* permuteBuffer)
 {
-    BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor");
-    BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+    ARMNN_ASSERT_MSG(weightTensor, "Invalid input tensor");
+    ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
 
     auto multiplier    = weightTensor->GetTensorInfo().GetShape()[0];
     auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
index 66056db..a4da924 100644 (file)
@@ -168,8 +168,8 @@ void CopyTensorContentsGeneric(const ITensorHandle* srcTensor, ITensorHandle* ds
                 auto dstPtrChannel = dstData;
                 for (unsigned int w = 0; w < copyWidth; ++w)
                 {
-                    BOOST_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
-                    BOOST_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
+                    ARMNN_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
+                    ARMNN_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
                     copy(dstData, srcData, copyLength);
                     dstData += dstWidthStride;
                     srcData += srcWidthStride;
index 116bf77..abdaa81 100644 (file)
@@ -23,7 +23,7 @@ namespace
 
 bool IsLayerSupported(const armnn::Layer* layer)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::LayerType layerType = layer->GetType();
     switch (layerType)
@@ -47,7 +47,7 @@ bool IsLayerSupported(const armnn::Layer& layer)
 
 bool IsLayerOptimizable(const armnn::Layer* layer)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     // A Layer is not optimizable if its name contains "unoptimizable"
     const std::string layerName(layer->GetName());
@@ -191,7 +191,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
                       supportedSubgraphs.end(),
                       [&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
         {
-            BOOST_ASSERT(supportedSubgraph != nullptr);
+            ARMNN_ASSERT(supportedSubgraph != nullptr);
 
             PreCompiledLayer* preCompiledLayer =
                 optimizationViews.GetGraph().AddLayer<PreCompiledLayer>(
@@ -228,7 +228,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
                       unsupportedSubgraphs.end(),
                       [&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
         {
-            BOOST_ASSERT(unsupportedSubgraph != nullptr);
+            ARMNN_ASSERT(unsupportedSubgraph != nullptr);
 
             optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
         });
@@ -256,7 +256,7 @@ OptimizationViews MockBackend::OptimizeSubgraphView(const SubgraphView& subgraph
                       untouchedSubgraphs.end(),
                       [&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
         {
-            BOOST_ASSERT(untouchedSubgraph != nullptr);
+            ARMNN_ASSERT(untouchedSubgraph != nullptr);
 
             optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
         });
index df001b7..9f38e47 100644 (file)
@@ -106,7 +106,7 @@ inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Option
         case armnn::DataType::QSymmS16:
             return armnn::DataType::Signed32;
         default:
-            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
     }
     return armnn::EmptyOptional();
 }
index 319434e..a82048c 100644 (file)
@@ -1212,9 +1212,9 @@ LayerTestResult<T,4> CompareActivationTestImpl(
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
-    BOOST_ASSERT(workload != nullptr);
+    ARMNN_ASSERT(workload != nullptr);
     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
-    BOOST_ASSERT(workloadRef != nullptr);
+    ARMNN_ASSERT(workloadRef != nullptr);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
index 2156b0e..a6b703b 100644 (file)
@@ -5,7 +5,7 @@
 
 #include "ComparisonTestImpl.hpp"
 
-
+#include <armnn/utility/Assert.hpp>
 #include <Half.hpp>
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
@@ -18,8 +18,6 @@
 
 #include <test/TensorHelpers.hpp>
 
-#include <boost/assert.hpp>
-
 namespace
 {
 
@@ -44,13 +42,13 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
     int outQuantOffset)
 {
     IgnoreUnused(memoryManager);
-    BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
+    ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
 
-    BOOST_ASSERT(shape1.GetNumDimensions() == NumDims);
+    ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
 
-    BOOST_ASSERT(outShape.GetNumDimensions() == NumDims);
+    ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
     armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
 
     auto input0 = MakeTensor<InType, NumDims>(inputTensorInfo0, values0);
index 1e40b42..9e08e30 100644 (file)
@@ -61,7 +61,7 @@ bool NeedPermuteForConcat(
         }
         else
         {
-            BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+            ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
                 "Input shapes must have the same number of dimensions");
         }
     }
@@ -92,7 +92,7 @@ void Generate3dPermuteVectorForConcat(
     unsigned int & concatDim,
     std::pair<PermutationVector, PermutationVector> & permutations)
 {
-    BOOST_ASSERT_MSG(numDimensions <= 3,
+    ARMNN_ASSERT_MSG(numDimensions <= 3,
        "Only dimensions 1,2 and 3 are supported by this helper");
     unsigned int expandedBy = 3 - numDimensions;
     unsigned int expandedConcatAxis = concatDim + expandedBy;
@@ -113,7 +113,7 @@ void Generate3dPermuteVectorForConcat(
     }
     else
     {
-        BOOST_ASSERT(expandedConcatAxis == 0);
+        ARMNN_ASSERT(expandedConcatAxis == 0);
         concatDim = 0;
     }
 }
@@ -127,7 +127,7 @@ template<typename T> void PermuteTensorData(
     std::vector<T>& outputData)
 {
     IgnoreUnused(memoryManager);
-    BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+    ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
     if (inputData == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the concatenation
@@ -179,7 +179,7 @@ template<typename T> void PermuteInputsForConcat(
     TensorInfo & outputTensorInfo)
 {
     IgnoreUnused(memoryManager);
-    BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+    ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
     unsigned int numDims = 0;
@@ -200,12 +200,12 @@ template<typename T> void PermuteInputsForConcat(
 
             // Store the reverese permutation.
             permuteVector = permutations.second;
-            BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+            ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
                 "Test logic error, we don't need permutation, so we shouldn't arrive here");
         }
         else
         {
-            BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+            ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
                 "All inputs must have the same number of dimensions");
         }
 
@@ -244,7 +244,7 @@ template <typename T> void PermuteOutputForConcat(
     std::unique_ptr<ITensorHandle> && inputDataHandle,
     T * data)
 {
-    BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+    ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
     if (data == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the permutation
@@ -279,7 +279,7 @@ template<typename T> void Concatenate(
     unsigned int concatDim,
     bool useSubtensor)
 {
-    BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+    ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
     if (output == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the permutation
index 50ad667..c66027e 100644 (file)
@@ -169,9 +169,9 @@ template<typename T, typename B>
 void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
     const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
 {
-    BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
                      "Invalid type and parameter combination.");
-    BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
                      "Invalid type and parameter combination.");
 
     // Note we need to dequantize and re-quantize the image value and the bias.
@@ -183,7 +183,7 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
             for (uint32_t x = 0; x < w; ++x)
             {
                 uint32_t offset = (i * h + y) * w + x;
-                BOOST_ASSERT(offset < v.size());
+                ARMNN_ASSERT(offset < v.size());
                 T& outRef = v[offset];
                 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
                 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
@@ -236,11 +236,11 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
     bool biasEnabled = bias.size() > 0;
 
     // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
-    BOOST_ASSERT(inputNum == 1);
-    BOOST_ASSERT(outputNum == 1);
+    ARMNN_ASSERT(inputNum == 1);
+    ARMNN_ASSERT(outputNum == 1);
 
     // If a bias is used, its size must equal the number of output channels.
-    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
 
     // Note these tensors will use two (identical) batches.
@@ -1627,7 +1627,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
 
     // If a bias is used, its size must equal the number of output channels.
     bool biasEnabled = bias.size() > 0;
-    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
     // Creates the tensors.
     armnn::TensorInfo inputTensorInfo =
@@ -2135,11 +2135,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
     bool biasEnabled = bias.size() > 0;
 
     // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
-    BOOST_ASSERT(inputNum == 1);
-    BOOST_ASSERT(outputNum == 1);
+    ARMNN_ASSERT(inputNum == 1);
+    ARMNN_ASSERT(outputNum == 1);
 
     // If a bias is used, its size must equal the number of output channels.
-    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
 
     // Note these tensors will use two (identical) batches.
index c277d2d..c64fc88 100644 (file)
@@ -6,6 +6,7 @@
 #pragma once
 
 #include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <boost/multi_array.hpp>
 
@@ -14,7 +15,7 @@
 template <std::size_t n>
 boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
 {
-    BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+    ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
         "Attempting to construct a shape array of mismatching size");
 
     boost::array<unsigned int, n> shape;
index 772ae2c..953b543 100644 (file)
@@ -104,7 +104,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
     outputHandle->Allocate();
     CopyDataToITensorHandle(inputHandle.get(), input.origin());
 
-    BOOST_ASSERT(workload);
+    ARMNN_ASSERT(workload);
 
     ExecuteWorkload(*workload, memoryManager);
 
index 068e295..f612c37 100644 (file)
@@ -7,6 +7,7 @@
 #include "ClContextControl.hpp"
 
 #include <armnn/Logging.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <arm_compute/core/CL/OpenCL.h>
 #include <arm_compute/core/CL/CLKernelLibrary.h>
@@ -184,7 +185,7 @@ ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options)
                             return TuningLevel::Exhaustive;
                         default:
                         {
-                            BOOST_ASSERT_MSG(false, "Tuning level not recognised.");
+                            ARMNN_ASSERT_MSG(false, "Tuning level not recognised.");
                             return TuningLevel::None;
                         }
                     }
index f307133..dbcccce 100644 (file)
@@ -9,12 +9,12 @@
 
 #include <LeakChecking.hpp>
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <arm_compute/core/CL/CLKernelLibrary.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/polymorphic_cast.hpp>
 
@@ -59,11 +59,11 @@ ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
 
     // Removes the use of global CL context.
     cl::Context::setDefault(cl::Context{});
-    BOOST_ASSERT(cl::Context::getDefault()() == NULL);
+    ARMNN_ASSERT(cl::Context::getDefault()() == NULL);
 
     // Removes the use of global CL command queue.
     cl::CommandQueue::setDefault(cl::CommandQueue{});
-    BOOST_ASSERT(cl::CommandQueue::getDefault()() == NULL);
+    ARMNN_ASSERT(cl::CommandQueue::getDefault()() == NULL);
 
     // Always load the OpenCL runtime.
     LoadOpenClRuntime();
index 39ae14e..e928870 100644 (file)
@@ -33,7 +33,7 @@ void ClConstantWorkload::Execute() const
     {
         const ConstantQueueDescriptor& data = this->m_Data;
 
-        BOOST_ASSERT(data.m_LayerOutput != nullptr);
+        ARMNN_ASSERT(data.m_LayerOutput != nullptr);
         arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
         arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
 
@@ -56,7 +56,7 @@ void ClConstantWorkload::Execute() const
             }
             default:
             {
-                BOOST_ASSERT_MSG(false, "Unknown data type");
+                ARMNN_ASSERT_MSG(false, "Unknown data type");
                 break;
             }
         }
index e8af0ee..73ec95c 100644 (file)
@@ -38,7 +38,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.has_value());
+        ARMNN_ASSERT(biases.has_value());
 
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
index 858eab4..8704b12 100644 (file)
@@ -45,7 +45,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.has_value());
+        ARMNN_ASSERT(biases.has_value());
 
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
@@ -125,7 +125,7 @@ ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
         arm_compute::ActivationLayerInfo(),
         aclDilationInfo);
 
-    BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+    ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
 
     ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
     InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
@@ -148,7 +148,7 @@ void ClDepthwiseConvolutionWorkload::FreeUnusedTensors()
 void ClDepthwiseConvolutionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
-    BOOST_ASSERT(m_DepthwiseConvolutionLayer);
+    ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
 
     RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
 }
index 7c07366..20b2104 100644 (file)
@@ -38,7 +38,7 @@ arm_compute::Status ClTransposeConvolution2dWorkloadValidate(const TensorInfo& i
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.has_value());
+        ARMNN_ASSERT(biases.has_value());
 
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
index b4bcc1c..54e7717 100644 (file)
@@ -90,7 +90,7 @@ inline auto SetClSliceData(const std::vector<unsigned int>& m_begin,
 inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
                                              const ConstCpuTensorHandle* handle)
 {
-    BOOST_ASSERT(handle);
+    ARMNN_ASSERT(handle);
 
     armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
     switch(handle->GetTensorInfo().GetDataType())
@@ -116,7 +116,7 @@ inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
             CopyArmComputeClTensorData(clTensor, handle->GetConstTensor<int32_t>());
             break;
         default:
-            BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+            ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
     }
 };
 
index d8dd01b..745c5fd 100644 (file)
@@ -5,8 +5,6 @@
 
 #include "NeonInterceptorScheduler.hpp"
 
-#include <boost/assert.hpp>
-
 namespace armnn{
 
 NeonInterceptorScheduler::NeonInterceptorScheduler(arm_compute::IScheduler &realScheduler)
index 11d2087..fb2c2b5 100644 (file)
@@ -7,6 +7,8 @@
 #include <BFloat16.hpp>
 #include <Half.hpp>
 
+#include <armnn/utility/Assert.hpp>
+
 #include <aclCommon/ArmComputeTensorHandle.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
@@ -61,7 +63,7 @@ public:
         // If we have enabled Importing, don't manage the tensor
         if (!m_IsImportEnabled)
         {
-            BOOST_ASSERT(m_MemoryGroup != nullptr);
+            ARMNN_ASSERT(m_MemoryGroup != nullptr);
             m_MemoryGroup->manage(&m_Tensor);
         }
     }
index 219edc9..1079a0d 100644 (file)
@@ -6,9 +6,10 @@
 #include "NeonTimer.hpp"
 #include "NeonInterceptorScheduler.hpp"
 
+#include <armnn/utility/Assert.hpp>
+
 #include <memory>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 namespace armnn
@@ -21,7 +22,7 @@ static thread_local auto g_Interceptor = std::make_shared<NeonInterceptorSchedul
 void NeonTimer::Start()
 {
     m_Kernels.clear();
-    BOOST_ASSERT(g_Interceptor->GetKernels() == nullptr);
+    ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
     g_Interceptor->SetKernels(&m_Kernels);
 
     m_RealSchedulerType = arm_compute::Scheduler::get_type();
index 83a2692..b9cb807 100644 (file)
@@ -39,7 +39,7 @@ void NeonConstantWorkload::Execute() const
     {
         const ConstantQueueDescriptor& data = this->m_Data;
 
-        BOOST_ASSERT(data.m_LayerOutput != nullptr);
+        ARMNN_ASSERT(data.m_LayerOutput != nullptr);
         arm_compute::ITensor& output =
             boost::polymorphic_downcast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
         arm_compute::DataType computeDataType =
@@ -69,7 +69,7 @@ void NeonConstantWorkload::Execute() const
             }
             default:
             {
-                BOOST_ASSERT_MSG(false, "Unknown data type");
+                ARMNN_ASSERT_MSG(false, "Unknown data type");
                 break;
             }
         }
index 683decd..5d45642 100644 (file)
@@ -37,7 +37,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.has_value());
+        ARMNN_ASSERT(biases.has_value());
 
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
@@ -97,7 +97,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
 
     m_ConvolutionLayer.reset(convolutionLayer.release());
 
-    BOOST_ASSERT(m_ConvolutionLayer);
+    ARMNN_ASSERT(m_ConvolutionLayer);
 
     InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
 
index e39fe54..a9a3c75 100644 (file)
@@ -49,7 +49,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.has_value());
+        ARMNN_ASSERT(biases.has_value());
 
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
@@ -127,7 +127,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
                                                         arm_compute::ActivationLayerInfo(),
                                                         aclDilationInfo);
 
-    BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+    ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
 
     ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
     InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
@@ -144,7 +144,7 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
 void NeonDepthwiseConvolutionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
-    BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
+    ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
 
     m_pDepthwiseConvolutionLayer->run();
 }
index c62f719..ffca207 100644 (file)
@@ -38,7 +38,7 @@ arm_compute::Status NeonTransposeConvolution2dWorkloadValidate(const TensorInfo&
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.has_value());
+        ARMNN_ASSERT(biases.has_value());
 
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
@@ -81,7 +81,7 @@ NeonTransposeConvolution2dWorkload::NeonTransposeConvolution2dWorkload(
     m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
     m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
 
-    BOOST_ASSERT(m_Layer);
+    ARMNN_ASSERT(m_Layer);
 
     InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
 
index 3f0fe84..c3c9d3d 100644 (file)
@@ -35,7 +35,7 @@ void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
                                            const ConstCpuTensorHandle* handle)
 {
-    BOOST_ASSERT(handle);
+    ARMNN_ASSERT(handle);
 
     switch(handle->GetTensorInfo().GetDataType())
     {
@@ -59,7 +59,7 @@ inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
             break;
         default:
-            BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+            ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
     }
 };
 
index 607c86b..25d639a 100644 (file)
@@ -348,7 +348,7 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
                                   "Reference concatenation: output type not supported");
     for (const TensorInfo* input : inputs)
     {
-        BOOST_ASSERT(input != nullptr);
+        ARMNN_ASSERT(input != nullptr);
         supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
             "Reference concatenation: input type not supported");
 
@@ -1864,7 +1864,7 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
                                   "Reference stack: output type not supported");
     for (const TensorInfo* input : inputs)
     {
-        BOOST_ASSERT(input != nullptr);
+        ARMNN_ASSERT(input != nullptr);
         supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
             "Reference stack: input type not supported");
 
index 4f15e39..76054e4 100644 (file)
@@ -4,7 +4,7 @@
 //
 #include "RefMemoryManager.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <algorithm>
 
@@ -35,7 +35,7 @@ RefMemoryManager::Pool* RefMemoryManager::Manage(unsigned int numBytes)
 
 void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
 {
-    BOOST_ASSERT(pool);
+    ARMNN_ASSERT(pool);
     m_FreePools.push_back(pool);
 }
 
@@ -75,25 +75,25 @@ RefMemoryManager::Pool::~Pool()
 
 void* RefMemoryManager::Pool::GetPointer()
 {
-    BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+    ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
     return m_Pointer;
 }
 
 void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
 {
-    BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+    ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
     m_Size = std::max(m_Size, numBytes);
 }
 
 void RefMemoryManager::Pool::Acquire()
 {
-    BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+    ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
     m_Pointer = ::operator new(size_t(m_Size));
 }
 
 void RefMemoryManager::Pool::Release()
 {
-    BOOST_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+    ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
     ::operator delete(m_Pointer);
     m_Pointer = nullptr;
 }
index 84a74ed..7d86b11 100644 (file)
@@ -44,8 +44,8 @@ RefTensorHandle::~RefTensorHandle()
 
 void RefTensorHandle::Manage()
 {
-    BOOST_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
-    BOOST_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+    ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
+    ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
 
     m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
 }
@@ -84,7 +84,7 @@ void* RefTensorHandle::GetPointer() const
     }
     else
     {
-        BOOST_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
+        ARMNN_ASSERT_MSG(m_Pool, "RefTensorHandle::GetPointer called on unmanaged, unallocated tensor handle");
         return m_MemoryManager->GetPointer(m_Pool);
     }
 }
@@ -92,14 +92,14 @@ void* RefTensorHandle::GetPointer() const
 void RefTensorHandle::CopyOutTo(void* dest) const
 {
     const void *src = GetPointer();
-    BOOST_ASSERT(src);
+    ARMNN_ASSERT(src);
     memcpy(dest, src, m_TensorInfo.GetNumBytes());
 }
 
 void RefTensorHandle::CopyInFrom(const void* src)
 {
     void *dest = GetPointer();
-    BOOST_ASSERT(dest);
+    ARMNN_ASSERT(dest);
     memcpy(dest, src, m_TensorInfo.GetNumBytes());
 }
 
index f43e8b6..be20644 100644 (file)
@@ -5,14 +5,13 @@
 
 #pragma once
 
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 
 #include <ResolveType.hpp>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
@@ -78,28 +77,28 @@ public:
 
     TypedIterator& operator++() override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         ++m_Iterator;
         return *this;
     }
 
     TypedIterator& operator+=(const unsigned int increment) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator += increment;
         return *this;
     }
 
     TypedIterator& operator-=(const unsigned int increment) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator -= increment;
         return *this;
     }
 
     TypedIterator& operator[](const unsigned int index) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -107,7 +106,7 @@ public:
     TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
     {
         IgnoreUnused(axisIndex);
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -504,7 +503,7 @@ public:
     // This should be called to set index for per-axis Encoder/Decoder
     PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
     {
-         BOOST_ASSERT(m_Iterator);
+         ARMNN_ASSERT(m_Iterator);
          m_Iterator = m_Start + index;
          m_AxisIndex = axisIndex;
          return *this;
@@ -519,7 +518,7 @@ public:
 
     PerAxisIterator& operator++() override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         ++m_Iterator;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
@@ -527,7 +526,7 @@ public:
 
     PerAxisIterator& operator+=(const unsigned int increment) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator += increment;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
@@ -535,7 +534,7 @@ public:
 
     PerAxisIterator& operator-=(const unsigned int decrement) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator -= decrement;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
@@ -543,7 +542,7 @@ public:
 
     PerAxisIterator& operator[](const unsigned int index) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
index 7efdb9b..bf7de1b 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <armnn/Types.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 using namespace armnnUtils;
 
@@ -42,11 +42,11 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
 {
     TensorShape inputShape = inputTensorInfo.GetShape();
 
-    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
+    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
 
     TensorShape outputShape = outputTensorInfo.GetShape();
 
-    BOOST_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
+    ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
 
     const unsigned int inputBatchSize = inputShape[0];
     const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
@@ -55,12 +55,12 @@ void BatchToSpaceNd(const DataLayoutIndexed& dataLayout,
     const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
     const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
 
-    BOOST_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
+    ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
 
     const unsigned int blockShapeHeight = blockShape[0];
     const unsigned int blockShapeWidth = blockShape[1];
 
-    BOOST_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
+    ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
 
     const unsigned int cropsTop = cropsData[0].first;
     const unsigned int cropsLeft = cropsData[1].first;
index bb55424..a85e34e 100644 (file)
@@ -38,7 +38,7 @@ void Concatenate(const ConcatQueueDescriptor &data)
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
-            BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+            ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
index 0c13e3b..9d2f410 100644 (file)
@@ -5,7 +5,7 @@
 
 #include "ConvImpl.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <cmath>
 #include <limits>
@@ -15,7 +15,7 @@ namespace armnn
 
 QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
 {
-    BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+    ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
     if (multiplier == 0.0f)
     {
         m_Multiplier = 0;
@@ -26,14 +26,14 @@ QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multi
         const double q = std::frexp(multiplier, &m_RightShift);
         m_RightShift = -m_RightShift;
         int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
-        BOOST_ASSERT(qFixed <= (1ll << 31));
+        ARMNN_ASSERT(qFixed <= (1ll << 31));
         if (qFixed == (1ll << 31))
         {
             qFixed /= 2;
             --m_RightShift;
         }
-        BOOST_ASSERT(m_RightShift >= 0);
-        BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
+        ARMNN_ASSERT(m_RightShift >= 0);
+        ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
         m_Multiplier = static_cast<int32_t>(qFixed);
     }
 }
@@ -61,7 +61,7 @@ int32_t QuantizedMultiplierSmallerThanOne::SaturatingRoundingDoublingHighMul(int
 
 int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
 {
-    BOOST_ASSERT(exponent >= 0 && exponent <= 31);
+    ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
     int32_t mask = (1 << exponent) - 1;
     int32_t remainder = x & mask;
     int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
index 562fd3e..f5aa8f3 100644 (file)
@@ -15,7 +15,6 @@
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <cmath>
index 3434ccb..deb3b1f 100644 (file)
@@ -10,7 +10,7 @@
 #include <armnnUtils/FloatingPointConverter.hpp>
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -142,7 +142,7 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
+            ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
             break;
         }
     }
index 91ca160..f5e9ec5 100644 (file)
@@ -8,7 +8,7 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 using namespace armnnUtils;
 
@@ -22,7 +22,7 @@ void DepthToSpace(const TensorInfo& inputInfo,
                   unsigned int dataTypeSize)
 {
     const unsigned int blockSize = descriptor.m_BlockSize;
-    BOOST_ASSERT(blockSize != 0u);
+    ARMNN_ASSERT(blockSize != 0u);
 
     const TensorShape& inputShape = inputInfo.GetShape();
     const unsigned int batches = inputShape[0];
index 63c0405..fdc8e30 100644 (file)
@@ -16,7 +16,7 @@ void Dequantize(Decoder<float>& inputDecoder,
                 const TensorInfo& outputInfo)
 {
     IgnoreUnused(outputInfo);
-    BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+    ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
     for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
     {
         // inputDecoder.Get() dequantizes the data element from whatever
index 57cf01e..61a504e 100644 (file)
@@ -5,8 +5,8 @@
 
 #include "DetectionPostProcess.hpp"
 
+#include <armnn/utility/Assert.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <algorithm>
@@ -213,8 +213,8 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
         // xmax
         boxCorners[indexW] = xCentre + halfW;
 
-        BOOST_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
-        BOOST_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
+        ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
+        ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
     }
 
     unsigned int numClassesWithBg = desc.m_NumClasses + 1;
index e93987d..c0524a7 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -89,7 +89,7 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void*
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
+            ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
             break;
         }
     }
@@ -107,7 +107,7 @@ inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void*
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+            ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
             break;
         }
     }
index 02d9b06..5a87520 100644 (file)
@@ -7,8 +7,6 @@
 
 #include "RefWorkloadUtils.hpp"
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
index 4cf3a14..c23edcd 100644 (file)
@@ -36,7 +36,7 @@ void Gather(const TensorInfo& paramsInfo,
     {
         unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
 
-        BOOST_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
+        ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
 
         unsigned int startOffset = indx * paramsProduct;
         unsigned int endOffset = startOffset + paramsProduct;
@@ -51,7 +51,7 @@ void Gather(const TensorInfo& paramsInfo,
         }
     }
 
-    BOOST_ASSERT(outIndex == outputInfo.GetNumElements());
+    ARMNN_ASSERT(outIndex == outputInfo.GetNumElements());
 }
 
 } //namespace armnn
index 103d62a..1998f50 100644 (file)
@@ -6,11 +6,11 @@
 #include "LogSoftmax.hpp"
 
 #include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <cmath>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace
@@ -35,7 +35,7 @@ void LogSoftmax(Decoder<float>& input,
     const unsigned int numDimensions = inputInfo.GetNumDimensions();
 
     bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
-    BOOST_ASSERT_MSG(axisIsValid,
+    ARMNN_ASSERT_MSG(axisIsValid,
         "Axis index is not in range [-numDimensions, numDimensions).");
     IgnoreUnused(axisIsValid);
 
index f2c0a4f..72080ef 100644 (file)
@@ -128,7 +128,7 @@ void Mean(const armnn::TensorInfo& inputInfo,
     for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
     {
         unsigned int current = inputDims[resolvedAxis[idx]];
-        BOOST_ASSERT(boost::numeric_cast<float>(current) <
+        ARMNN_ASSERT(boost::numeric_cast<float>(current) <
               (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
         numElementsInAxis *= current;
     }
index 3506198..d3e65e6 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <armnn/Types.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <cstring>
 
@@ -24,10 +24,10 @@ void RefConstantWorkload::PostAllocationConfigure()
 {
     const ConstantQueueDescriptor& data = this->m_Data;
 
-    BOOST_ASSERT(data.m_LayerOutput != nullptr);
+    ARMNN_ASSERT(data.m_LayerOutput != nullptr);
 
     const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
-    BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+    ARMNN_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
 
     memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
         outputInfo.GetNumBytes());
index ac82db9..f8c3548 100644 (file)
@@ -32,7 +32,7 @@ RefFullyConnectedWorkload::RefFullyConnectedWorkload(
 void RefFullyConnectedWorkload::PostAllocationConfigure()
 {
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
+    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
     m_InputShape = inputInfo.GetShape();
     m_InputDecoder = MakeDecoder<float>(inputInfo);
 
index a987e79..a2ace13 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <Profiling.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -27,8 +27,8 @@ void RefLogSoftmaxWorkload::Execute() const
     std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
     std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
 
-    BOOST_ASSERT(decoder != nullptr);
-    BOOST_ASSERT(encoder != nullptr);
+    ARMNN_ASSERT(decoder != nullptr);
+    ARMNN_ASSERT(encoder != nullptr);
 
     LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
 }
index be36f40..fc85950 100644 (file)
@@ -26,7 +26,7 @@ void RefStackWorkload::Execute() const
     if (!m_Data.m_Parameters.m_Axis)
     {
         float* output = GetOutputTensorData<float>(0, m_Data);
-        BOOST_ASSERT(output != nullptr);
+        ARMNN_ASSERT(output != nullptr);
 
         unsigned int numInputs = m_Data.m_Parameters.m_NumInputs;
         unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
index bfd3c28..e994a09 100644 (file)
@@ -27,7 +27,7 @@ void RefStridedSliceWorkload::Execute() const
     DataType inputDataType  = inputInfo.GetDataType();
     DataType outputDataType = outputInfo.GetDataType();
 
-    BOOST_ASSERT(inputDataType == outputDataType);
+    ARMNN_ASSERT(inputDataType == outputDataType);
     IgnoreUnused(outputDataType);
 
     StridedSlice(inputInfo,
index 0223cdc..e972524 100644 (file)
@@ -5,9 +5,9 @@
 
 #include "Slice.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnn
@@ -22,11 +22,11 @@ void Slice(const TensorInfo& inputInfo,
     const TensorShape& inputShape = inputInfo.GetShape();
     const unsigned int numDims    = inputShape.GetNumDimensions();
 
-    BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
-    BOOST_ASSERT(descriptor.m_Size.size()  == numDims);
+    ARMNN_ASSERT(descriptor.m_Begin.size() == numDims);
+    ARMNN_ASSERT(descriptor.m_Size.size()  == numDims);
 
     constexpr unsigned int maxNumDims = 4;
-    BOOST_ASSERT(numDims <= maxNumDims);
+    ARMNN_ASSERT(numDims <= maxNumDims);
 
     std::vector<unsigned int> paddedInput(4);
     std::vector<unsigned int> paddedBegin(4);
@@ -65,10 +65,10 @@ void Slice(const TensorInfo& inputInfo,
     unsigned int size2  = paddedSize[2];
     unsigned int size3  = paddedSize[3];
 
-    BOOST_ASSERT(begin0 + size0 <= dim0);
-    BOOST_ASSERT(begin1 + size1 <= dim1);
-    BOOST_ASSERT(begin2 + size2 <= dim2);
-    BOOST_ASSERT(begin3 + size3 <= dim3);
+    ARMNN_ASSERT(begin0 + size0 <= dim0);
+    ARMNN_ASSERT(begin1 + size1 <= dim1);
+    ARMNN_ASSERT(begin2 + size2 <= dim2);
+    ARMNN_ASSERT(begin3 + size3 <= dim3);
 
     const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
     unsigned char* output      = reinterpret_cast<unsigned char*>(outputData);
index 5036389..32eca84 100644 (file)
@@ -16,9 +16,9 @@ namespace armnn
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
 void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
 {
-    BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+    ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
                      "Required axis index greater than number of dimensions.");
-    BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+    ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
                      "Required axis index lower than negative of the number of dimensions");
 
     unsigned int uAxis = axis < 0  ?
index 3bddfb0..09edc5e 100644 (file)
@@ -6,8 +6,7 @@
 #include "RefWorkloadUtils.hpp"
 #include <backendsCommon/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 #include "Splitter.hpp"
 
 #include <cmath>
@@ -47,7 +46,7 @@ void Split(const SplitterQueueDescriptor& data)
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
-            BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
index 271c6fd..26309b0 100644 (file)
@@ -8,7 +8,7 @@
 #include "RefWorkloadUtils.hpp"
 #include <backendsCommon/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -38,7 +38,7 @@ void Splitter(const SplitterQueueDescriptor& data)
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
-            BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
@@ -67,10 +67,10 @@ void Splitter(const SplitterQueueDescriptor& data)
 
                 //We are within the view, to copy input data to the output corresponding to this view.
                 DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
-                BOOST_ASSERT(outputData);
+                ARMNN_ASSERT(outputData);
 
                 const DataType* inputData = GetInputTensorData<DataType>(0, data);
-                BOOST_ASSERT(inputData);
+                ARMNN_ASSERT(inputData);
 
                 outputData[outIndex] = inputData[index];
             }
index 62f06dc..b00b049 100644 (file)
@@ -7,7 +7,8 @@
 
 #include <ResolveType.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <cstring>
@@ -20,12 +21,12 @@ namespace
 
 void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
 {
-    BOOST_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+    ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
 
     const unsigned int beginIndicesCount =
         boost::numeric_cast<unsigned int>(p.m_Begin.size());
 
-    BOOST_ASSERT(dimCount >= beginIndicesCount);
+    ARMNN_ASSERT(dimCount >= beginIndicesCount);
     const unsigned int padCount = dimCount - beginIndicesCount;
 
     p.m_Begin.resize(dimCount);
index e03c42f..5d66fd5 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -25,7 +25,7 @@ public:
         , m_Data(data)
         , m_DataLayout(dataLayout)
     {
-        BOOST_ASSERT(m_Shape.GetNumDimensions() == 4);
+        ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
     }
 
     DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
index bb60ac1..cae7037 100644 (file)
@@ -62,7 +62,7 @@ void CommandHandler::HandleCommands(IProfilingConnection& profilingConnection)
                 m_CommandHandlerRegistry.GetFunctor(packet.GetPacketFamily(), 
                                                     packet.GetPacketId(), 
                                                     version.GetEncodedValue());
-            BOOST_ASSERT(commandHandlerFunctor);
+            ARMNN_ASSERT(commandHandlerFunctor);
             commandHandlerFunctor->operator()(packet);
         }
         catch (const armnn::TimeoutException&)
index 8070afe..c2fef7a 100644 (file)
@@ -5,7 +5,8 @@
 
 #include "CommandHandlerRegistry.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
 #include <boost/format.hpp>
 
 namespace armnn
@@ -19,7 +20,7 @@ void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor,
                                              uint32_t packetId,
                                              uint32_t version)
 {
-    BOOST_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
+    ARMNN_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
 
     CommandHandlerKey key(familyId, packetId, version);
     registry[key] = functor;
@@ -27,7 +28,7 @@ void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor,
 
 void CommandHandlerRegistry::RegisterFunctor(CommandHandlerFunctor* functor)
 {
-    BOOST_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
+    ARMNN_ASSERT_MSG(functor, "Provided functor should not be a nullptr");
 
     RegisterFunctor(functor, functor->GetFamilyId(), functor->GetPacketId(), functor->GetVersion());
 }
index c84da10..415a660 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <armnn/Exceptions.hpp>
 #include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/format.hpp>
@@ -37,11 +38,11 @@ const Category* CounterDirectory::RegisterCategory(const std::string& categoryNa
 
     // Create the category
     CategoryPtr category = std::make_unique<Category>(categoryName);
-    BOOST_ASSERT(category);
+    ARMNN_ASSERT(category);
 
     // Get the raw category pointer
     const Category* categoryPtr = category.get();
-    BOOST_ASSERT(categoryPtr);
+    ARMNN_ASSERT(categoryPtr);
 
     // Register the category
     m_Categories.insert(std::move(category));
@@ -99,11 +100,11 @@ const Device* CounterDirectory::RegisterDevice(const std::string& deviceName,
 
     // Create the device
     DevicePtr device = std::make_unique<Device>(deviceUid, deviceName, cores);
-    BOOST_ASSERT(device);
+    ARMNN_ASSERT(device);
 
     // Get the raw device pointer
     const Device* devicePtr = device.get();
-    BOOST_ASSERT(devicePtr);
+    ARMNN_ASSERT(devicePtr);
 
     // Register the device
     m_Devices.insert(std::make_pair(deviceUid, std::move(device)));
@@ -162,15 +163,15 @@ const CounterSet* CounterDirectory::RegisterCounterSet(const std::string& counte
 
     // Get the counter set UID
     uint16_t counterSetUid = GetNextUid();
-    BOOST_ASSERT(counterSetUid == counterSetUidPeek);
+    ARMNN_ASSERT(counterSetUid == counterSetUidPeek);
 
     // Create the counter set
     CounterSetPtr counterSet = std::make_unique<CounterSet>(counterSetUid, counterSetName, count);
-    BOOST_ASSERT(counterSet);
+    ARMNN_ASSERT(counterSet);
 
     // Get the raw counter set pointer
     const CounterSet* counterSetPtr = counterSet.get();
-    BOOST_ASSERT(counterSetPtr);
+    ARMNN_ASSERT(counterSetPtr);
 
     // Register the counter set
     m_CounterSets.insert(std::make_pair(counterSetUid, std::move(counterSet)));
@@ -251,14 +252,14 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
 
     // Get the parent category
     const CategoryPtr& parentCategory = *categoryIt;
-    BOOST_ASSERT(parentCategory);
+    ARMNN_ASSERT(parentCategory);
 
     // Check that a counter with the given name is not already registered within the parent category
     const std::vector<uint16_t>& parentCategoryCounters = parentCategory->m_Counters;
     for (uint16_t parentCategoryCounterUid : parentCategoryCounters)
     {
         const Counter* parentCategoryCounter = GetCounter(parentCategoryCounterUid);
-        BOOST_ASSERT(parentCategoryCounter);
+        ARMNN_ASSERT(parentCategoryCounter);
 
         if (parentCategoryCounter->m_Name == name)
         {
@@ -290,7 +291,7 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
 
     // Get the counter UIDs and calculate the max counter UID
     std::vector<uint16_t> counterUids = GetNextCounterUids(uid, deviceCores);
-    BOOST_ASSERT(!counterUids.empty());
+    ARMNN_ASSERT(!counterUids.empty());
     uint16_t maxCounterUid = deviceCores <= 1 ? counterUids.front() : counterUids.back();
 
     // Get the counter units
@@ -308,11 +309,11 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
                                                    unitsValue,
                                                    deviceUidValue,
                                                    counterSetUidValue);
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     // Get the raw counter pointer
     const Counter* counterPtr = counter.get();
-    BOOST_ASSERT(counterPtr);
+    ARMNN_ASSERT(counterPtr);
 
     // Process multiple counters if necessary
     for (uint16_t counterUid : counterUids)
@@ -336,7 +337,7 @@ const Category* CounterDirectory::GetCategory(const std::string& categoryName) c
     }
 
     const Category* category = it->get();
-    BOOST_ASSERT(category);
+    ARMNN_ASSERT(category);
 
     return category;
 }
@@ -350,8 +351,8 @@ const Device* CounterDirectory::GetDevice(uint16_t deviceUid) const
     }
 
     const Device* device = it->second.get();
-    BOOST_ASSERT(device);
-    BOOST_ASSERT(device->m_Uid == deviceUid);
+    ARMNN_ASSERT(device);
+    ARMNN_ASSERT(device->m_Uid == deviceUid);
 
     return device;
 }
@@ -365,8 +366,8 @@ const CounterSet* CounterDirectory::GetCounterSet(uint16_t counterSetUid) const
     }
 
     const CounterSet* counterSet = it->second.get();
-    BOOST_ASSERT(counterSet);
-    BOOST_ASSERT(counterSet->m_Uid == counterSetUid);
+    ARMNN_ASSERT(counterSet);
+    ARMNN_ASSERT(counterSet->m_Uid == counterSetUid);
 
     return counterSet;
 }
@@ -380,9 +381,9 @@ const Counter* CounterDirectory::GetCounter(uint16_t counterUid) const
     }
 
     const Counter* counter = it->second.get();
-    BOOST_ASSERT(counter);
-    BOOST_ASSERT(counter->m_Uid <= counterUid);
-    BOOST_ASSERT(counter->m_Uid <= counter->m_MaxCounterUid);
+    ARMNN_ASSERT(counter);
+    ARMNN_ASSERT(counter->m_Uid <= counterUid);
+    ARMNN_ASSERT(counter->m_Uid <= counter->m_MaxCounterUid);
 
     return counter;
 }
@@ -449,7 +450,7 @@ CategoriesIt CounterDirectory::FindCategory(const std::string& categoryName) con
 {
     return std::find_if(m_Categories.begin(), m_Categories.end(), [&categoryName](const CategoryPtr& category)
     {
-        BOOST_ASSERT(category);
+        ARMNN_ASSERT(category);
 
         return category->m_Name == categoryName;
     });
@@ -464,8 +465,8 @@ DevicesIt CounterDirectory::FindDevice(const std::string& deviceName) const
 {
     return std::find_if(m_Devices.begin(), m_Devices.end(), [&deviceName](const auto& pair)
     {
-        BOOST_ASSERT(pair.second);
-        BOOST_ASSERT(pair.second->m_Uid == pair.first);
+        ARMNN_ASSERT(pair.second);
+        ARMNN_ASSERT(pair.second->m_Uid == pair.first);
 
         return pair.second->m_Name == deviceName;
     });
@@ -480,8 +481,8 @@ CounterSetsIt CounterDirectory::FindCounterSet(const std::string& counterSetName
 {
     return std::find_if(m_CounterSets.begin(), m_CounterSets.end(), [&counterSetName](const auto& pair)
     {
-        BOOST_ASSERT(pair.second);
-        BOOST_ASSERT(pair.second->m_Uid == pair.first);
+        ARMNN_ASSERT(pair.second);
+        ARMNN_ASSERT(pair.second->m_Uid == pair.first);
 
         return pair.second->m_Name == counterSetName;
     });
@@ -496,8 +497,8 @@ CountersIt CounterDirectory::FindCounter(const std::string& counterName) const
 {
     return std::find_if(m_Counters.begin(), m_Counters.end(), [&counterName](const auto& pair)
     {
-        BOOST_ASSERT(pair.second);
-        BOOST_ASSERT(pair.second->m_Uid == pair.first);
+        ARMNN_ASSERT(pair.second);
+        ARMNN_ASSERT(pair.second->m_Uid == pair.first);
 
         return pair.second->m_Name == counterName;
     });
@@ -536,7 +537,7 @@ uint16_t CounterDirectory::GetNumberOfCores(const Optional<uint16_t>& numberOfCo
 
         // Get the associated device
         const DevicePtr& device = deviceIt->second;
-        BOOST_ASSERT(device);
+        ARMNN_ASSERT(device);
 
         // Get the number of cores of the associated device
         return device->m_Cores;
index 83229ca..f9bdde9 100644 (file)
@@ -111,7 +111,7 @@ bool FileOnlyProfilingConnection::SendCounterSelectionPacket()
 
 bool FileOnlyProfilingConnection::WritePacket(const unsigned char* buffer, uint32_t length)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     // Read Header and determine case
     uint32_t outgoingHeaderAsWords[2];
index 3a8f3f8..4d7241e 100644 (file)
@@ -134,7 +134,7 @@ void ProfilingService::Update()
         try
         {
             // Setup the profiling connection
-            BOOST_ASSERT(m_ProfilingConnectionFactory);
+            ARMNN_ASSERT(m_ProfilingConnectionFactory);
             m_ProfilingConnection = m_ProfilingConnectionFactory->GetProfilingConnection(m_Options);
         }
         catch (const Exception& e)
@@ -155,7 +155,7 @@ void ProfilingService::Update()
                                                                           // "NotConnected" state
         break;
     case ProfilingState::WaitingForAck:
-        BOOST_ASSERT(m_ProfilingConnection);
+        ARMNN_ASSERT(m_ProfilingConnection);
 
         // Start the command thread
         m_CommandHandler.Start(*m_ProfilingConnection);
@@ -204,7 +204,7 @@ void ProfilingService::Disconnect()
 void ProfilingService::AddBackendProfilingContext(const BackendId backendId,
     std::shared_ptr<armnn::profiling::IBackendProfilingContext> profilingContext)
 {
-    BOOST_ASSERT(profilingContext != nullptr);
+    ARMNN_ASSERT(profilingContext != nullptr);
     // Register the backend counters
     m_MaxGlobalCounterId = profilingContext->RegisterCounters(m_MaxGlobalCounterId);
     m_BackendProfilingContexts.emplace(backendId, std::move(profilingContext));
@@ -238,7 +238,7 @@ uint32_t ProfilingService::GetCounterValue(uint16_t counterUid) const
 {
     CheckCounterUid(counterUid);
     std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
-    BOOST_ASSERT(counterValuePtr);
+    ARMNN_ASSERT(counterValuePtr);
     return counterValuePtr->load(std::memory_order::memory_order_relaxed);
 }
 
@@ -268,7 +268,7 @@ void ProfilingService::SetCounterValue(uint16_t counterUid, uint32_t value)
 {
     CheckCounterUid(counterUid);
     std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
-    BOOST_ASSERT(counterValuePtr);
+    ARMNN_ASSERT(counterValuePtr);
     counterValuePtr->store(value, std::memory_order::memory_order_relaxed);
 }
 
@@ -276,7 +276,7 @@ uint32_t ProfilingService::AddCounterValue(uint16_t counterUid, uint32_t value)
 {
     CheckCounterUid(counterUid);
     std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
-    BOOST_ASSERT(counterValuePtr);
+    ARMNN_ASSERT(counterValuePtr);
     return counterValuePtr->fetch_add(value, std::memory_order::memory_order_relaxed);
 }
 
@@ -284,7 +284,7 @@ uint32_t ProfilingService::SubtractCounterValue(uint16_t counterUid, uint32_t va
 {
     CheckCounterUid(counterUid);
     std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
-    BOOST_ASSERT(counterValuePtr);
+    ARMNN_ASSERT(counterValuePtr);
     return counterValuePtr->fetch_sub(value, std::memory_order::memory_order_relaxed);
 }
 
@@ -292,7 +292,7 @@ uint32_t ProfilingService::IncrementCounterValue(uint16_t counterUid)
 {
     CheckCounterUid(counterUid);
     std::atomic<uint32_t>* counterValuePtr = m_CounterIndex.at(counterUid);
-    BOOST_ASSERT(counterValuePtr);
+    ARMNN_ASSERT(counterValuePtr);
     return counterValuePtr->operator++(std::memory_order::memory_order_relaxed);
 }
 
@@ -332,7 +332,7 @@ void ProfilingService::Initialize()
                                                    "Network loads",
                                                    "The number of networks loaded at runtime",
                                                    std::string("networks"));
-        BOOST_ASSERT(loadedNetworksCounter);
+        ARMNN_ASSERT(loadedNetworksCounter);
         InitializeCounterValue(loadedNetworksCounter->m_Uid);
     }
     // Register a counter for the number of unloaded networks
@@ -348,7 +348,7 @@ void ProfilingService::Initialize()
                                                    "Network unloads",
                                                    "The number of networks unloaded at runtime",
                                                    std::string("networks"));
-        BOOST_ASSERT(unloadedNetworksCounter);
+        ARMNN_ASSERT(unloadedNetworksCounter);
         InitializeCounterValue(unloadedNetworksCounter->m_Uid);
     }
     // Register a counter for the number of registered backends
@@ -364,7 +364,7 @@ void ProfilingService::Initialize()
                                                    "Backends registered",
                                                    "The number of registered backends",
                                                    std::string("backends"));
-        BOOST_ASSERT(registeredBackendsCounter);
+        ARMNN_ASSERT(registeredBackendsCounter);
         InitializeCounterValue(registeredBackendsCounter->m_Uid);
     }
     // Register a counter for the number of registered backends
@@ -380,7 +380,7 @@ void ProfilingService::Initialize()
                                                    "Backends unregistered",
                                                    "The number of unregistered backends",
                                                    std::string("backends"));
-        BOOST_ASSERT(unregisteredBackendsCounter);
+        ARMNN_ASSERT(unregisteredBackendsCounter);
         InitializeCounterValue(unregisteredBackendsCounter->m_Uid);
     }
     // Register a counter for the number of inferences run
@@ -396,7 +396,7 @@ void ProfilingService::Initialize()
                                                    "Inferences run",
                                                    "The number of inferences run",
                                                    std::string("inferences"));
-        BOOST_ASSERT(inferencesRunCounter);
+        ARMNN_ASSERT(inferencesRunCounter);
         InitializeCounterValue(inferencesRunCounter->m_Uid);
     }
 }
index df7bd8f..a6c5e29 100644 (file)
@@ -264,8 +264,8 @@ protected:
                                         IProfilingConnectionFactory* other,
                                         IProfilingConnectionFactory*& backup)
     {
-        BOOST_ASSERT(instance.m_ProfilingConnectionFactory);
-        BOOST_ASSERT(other);
+        ARMNN_ASSERT(instance.m_ProfilingConnectionFactory);
+        ARMNN_ASSERT(other);
 
         backup = instance.m_ProfilingConnectionFactory.release();
         instance.m_ProfilingConnectionFactory.reset(other);
index e419769..e542b69 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <WallClockTimer.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <fstream>
 #include <iostream>
@@ -88,7 +88,7 @@ std::vector<uint16_t> GetNextCounterUids(uint16_t firstUid, uint16_t cores)
 
 void WriteBytes(const IPacketBufferPtr& packetBuffer, unsigned int offset,  const void* value, unsigned int valueSize)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     WriteBytes(packetBuffer->GetWritableData(), offset, value, valueSize);
 }
@@ -102,36 +102,36 @@ uint32_t ConstructHeader(uint32_t packetFamily,
 
 void WriteUint64(const std::unique_ptr<IPacketBuffer>& packetBuffer, unsigned int offset, uint64_t value)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     WriteUint64(packetBuffer->GetWritableData(), offset, value);
 }
 
 void WriteUint32(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint32_t value)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     WriteUint32(packetBuffer->GetWritableData(), offset, value);
 }
 
 void WriteUint16(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint16_t value)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     WriteUint16(packetBuffer->GetWritableData(), offset, value);
 }
 
 void WriteUint8(const IPacketBufferPtr& packetBuffer, unsigned int offset, uint8_t value)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     WriteUint8(packetBuffer->GetWritableData(), offset, value);
 }
 
 void WriteBytes(unsigned char* buffer, unsigned int offset, const void* value, unsigned int valueSize)
 {
-    BOOST_ASSERT(buffer);
-    BOOST_ASSERT(value);
+    ARMNN_ASSERT(buffer);
+    ARMNN_ASSERT(value);
 
     for (unsigned int i = 0; i < valueSize; i++, offset++)
     {
@@ -141,7 +141,7 @@ void WriteBytes(unsigned char* buffer, unsigned int offset, const void* value, u
 
 void WriteUint64(unsigned char* buffer, unsigned int offset, uint64_t value)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     buffer[offset]     = static_cast<unsigned char>(value & 0xFF);
     buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
@@ -155,7 +155,7 @@ void WriteUint64(unsigned char* buffer, unsigned int offset, uint64_t value)
 
 void WriteUint32(unsigned char* buffer, unsigned int offset, uint32_t value)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     buffer[offset]     = static_cast<unsigned char>(value & 0xFF);
     buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
@@ -165,7 +165,7 @@ void WriteUint32(unsigned char* buffer, unsigned int offset, uint32_t value)
 
 void WriteUint16(unsigned char* buffer, unsigned int offset, uint16_t value)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     buffer[offset]     = static_cast<unsigned char>(value & 0xFF);
     buffer[offset + 1] = static_cast<unsigned char>((value >> 8) & 0xFF);
@@ -173,50 +173,50 @@ void WriteUint16(unsigned char* buffer, unsigned int offset, uint16_t value)
 
 void WriteUint8(unsigned char* buffer, unsigned int offset, uint8_t value)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     buffer[offset] = static_cast<unsigned char>(value);
 }
 
 void ReadBytes(const IPacketBufferPtr& packetBuffer, unsigned int offset, unsigned int valueSize, uint8_t outValue[])
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     ReadBytes(packetBuffer->GetReadableData(), offset, valueSize, outValue);
 }
 
 uint64_t ReadUint64(const IPacketBufferPtr& packetBuffer, unsigned int offset)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     return ReadUint64(packetBuffer->GetReadableData(), offset);
 }
 
 uint32_t ReadUint32(const IPacketBufferPtr& packetBuffer, unsigned int offset)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     return ReadUint32(packetBuffer->GetReadableData(), offset);
 }
 
 uint16_t ReadUint16(const IPacketBufferPtr& packetBuffer, unsigned int offset)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     return ReadUint16(packetBuffer->GetReadableData(), offset);
 }
 
 uint8_t ReadUint8(const IPacketBufferPtr& packetBuffer, unsigned int offset)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     return ReadUint8(packetBuffer->GetReadableData(), offset);
 }
 
 void ReadBytes(const unsigned char* buffer, unsigned int offset, unsigned int valueSize, uint8_t outValue[])
 {
-    BOOST_ASSERT(buffer);
-    BOOST_ASSERT(outValue);
+    ARMNN_ASSERT(buffer);
+    ARMNN_ASSERT(outValue);
 
     for (unsigned int i = 0; i < valueSize; i++, offset++)
     {
@@ -226,7 +226,7 @@ void ReadBytes(const unsigned char* buffer, unsigned int offset, unsigned int va
 
 uint64_t ReadUint64(const unsigned char* buffer, unsigned int offset)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     uint64_t value = 0;
     value  = static_cast<uint64_t>(buffer[offset]);
@@ -243,7 +243,7 @@ uint64_t ReadUint64(const unsigned char* buffer, unsigned int offset)
 
 uint32_t ReadUint32(const unsigned char* buffer, unsigned int offset)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     uint32_t value = 0;
     value  = static_cast<uint32_t>(buffer[offset]);
@@ -255,7 +255,7 @@ uint32_t ReadUint32(const unsigned char* buffer, unsigned int offset)
 
 uint16_t ReadUint16(const unsigned char* buffer, unsigned int offset)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     uint32_t value = 0;
     value  = static_cast<uint32_t>(buffer[offset]);
@@ -265,7 +265,7 @@ uint16_t ReadUint16(const unsigned char* buffer, unsigned int offset)
 
 uint8_t ReadUint8(const unsigned char* buffer, unsigned int offset)
 {
-    BOOST_ASSERT(buffer);
+    ARMNN_ASSERT(buffer);
 
     return buffer[offset];
 }
@@ -310,7 +310,7 @@ uint32_t CalculateSizeOfPaddedSwString(const std::string& str)
 // Read TimelineMessageDirectoryPacket from given IPacketBuffer and offset
 SwTraceMessage ReadSwTraceMessage(const unsigned char* packetBuffer, unsigned int& offset)
 {
-    BOOST_ASSERT(packetBuffer);
+    ARMNN_ASSERT(packetBuffer);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
 
index ae4bab9..24b86d4 100644 (file)
@@ -9,6 +9,7 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Conversion.hpp>
 #include <Processes.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/format.hpp>
@@ -178,10 +179,10 @@ bool SendCounterPacket::CreateCategoryRecord(const CategoryPtr& category,
 {
     using namespace boost::numeric;
 
-    BOOST_ASSERT(category);
+    ARMNN_ASSERT(category);
 
     const std::string& categoryName = category->m_Name;
-    BOOST_ASSERT(!categoryName.empty());
+    ARMNN_ASSERT(!categoryName.empty());
 
     // Remove any duplicate counters
     std::vector<uint16_t> categoryCounters;
@@ -299,13 +300,13 @@ bool SendCounterPacket::CreateDeviceRecord(const DevicePtr& device,
                                            DeviceRecord& deviceRecord,
                                            std::string& errorMessage)
 {
-    BOOST_ASSERT(device);
+    ARMNN_ASSERT(device);
 
     uint16_t deviceUid = device->m_Uid;
     const std::string& deviceName = device->m_Name;
     uint16_t deviceCores = device->m_Cores;
 
-    BOOST_ASSERT(!deviceName.empty());
+    ARMNN_ASSERT(!deviceName.empty());
 
     // Device record word 0:
     // 16:31 [16] uid: the unique identifier for the device
@@ -349,13 +350,13 @@ bool SendCounterPacket::CreateCounterSetRecord(const CounterSetPtr& counterSet,
                                                CounterSetRecord& counterSetRecord,
                                                std::string& errorMessage)
 {
-    BOOST_ASSERT(counterSet);
+    ARMNN_ASSERT(counterSet);
 
     uint16_t counterSetUid = counterSet->m_Uid;
     const std::string& counterSetName = counterSet->m_Name;
     uint16_t counterSetCount = counterSet->m_Count;
 
-    BOOST_ASSERT(!counterSetName.empty());
+    ARMNN_ASSERT(!counterSetName.empty());
 
     // Counter set record word 0:
     // 16:31 [16] uid: the unique identifier for the counter_set
@@ -402,7 +403,7 @@ bool SendCounterPacket::CreateEventRecord(const CounterPtr& counter,
 {
     using namespace boost::numeric;
 
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     uint16_t           counterUid           = counter->m_Uid;
     uint16_t           maxCounterUid        = counter->m_MaxCounterUid;
@@ -415,9 +416,9 @@ bool SendCounterPacket::CreateEventRecord(const CounterPtr& counter,
     const std::string& counterDescription   = counter->m_Description;
     const std::string& counterUnits         = counter->m_Units;
 
-    BOOST_ASSERT(counterClass == 0 || counterClass == 1);
-    BOOST_ASSERT(counterInterpolation == 0 || counterInterpolation == 1);
-    BOOST_ASSERT(counterMultiplier);
+    ARMNN_ASSERT(counterClass == 0 || counterClass == 1);
+    ARMNN_ASSERT(counterInterpolation == 0 || counterInterpolation == 1);
+    ARMNN_ASSERT(counterMultiplier);
 
     // Utils
     size_t uint32_t_size = sizeof(uint32_t);
@@ -450,7 +451,7 @@ bool SendCounterPacket::CreateEventRecord(const CounterPtr& counter,
     // 0:63 [64] multiplier: internal data stream is represented as integer values, this allows scaling of
     //                       those values as if they are fixed point numbers. Zero is not a valid value
     uint32_t multiplier[2] = { 0u, 0u };
-    BOOST_ASSERT(sizeof(counterMultiplier) == sizeof(multiplier));
+    ARMNN_ASSERT(sizeof(counterMultiplier) == sizeof(multiplier));
     std::memcpy(multiplier, &counterMultiplier, sizeof(multiplier));
     uint32_t eventRecordWord3 = multiplier[0];
     uint32_t eventRecordWord4 = multiplier[1];
index 3e52c97..9954bd9 100644 (file)
@@ -9,7 +9,7 @@
 #include "armnn/profiling/ISendTimelinePacket.hpp"
 #include "ProfilingUtils.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <memory>
 
@@ -78,7 +78,7 @@ void SendTimelinePacket::ForwardWriteBinaryFunction(Func& func, Params&& ... par
     try
     {
         ReserveBuffer();
-        BOOST_ASSERT(m_WriteBuffer);
+        ARMNN_ASSERT(m_WriteBuffer);
         unsigned int numberOfBytesWritten = 0;
         // Header will be prepended to the buffer on Commit()
         while ( true )
index ada55d8..2cd44c4 100644 (file)
@@ -16,9 +16,9 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Optional.hpp>
 #include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <atomic>
@@ -449,11 +449,11 @@ public:
     {
         // Create the category
         CategoryPtr category = std::make_unique<Category>(categoryName);
-        BOOST_ASSERT(category);
+        ARMNN_ASSERT(category);
 
         // Get the raw category pointer
         const Category* categoryPtr = category.get();
-        BOOST_ASSERT(categoryPtr);
+        ARMNN_ASSERT(categoryPtr);
 
         // Register the category
         m_Categories.insert(std::move(category));
@@ -469,11 +469,11 @@ public:
 
         // Create the device
         DevicePtr device = std::make_unique<Device>(deviceUid, deviceName, cores);
-        BOOST_ASSERT(device);
+        ARMNN_ASSERT(device);
 
         // Get the raw device pointer
         const Device* devicePtr = device.get();
-        BOOST_ASSERT(devicePtr);
+        ARMNN_ASSERT(devicePtr);
 
         // Register the device
         m_Devices.insert(std::make_pair(deviceUid, std::move(device)));
@@ -490,11 +490,11 @@ public:
 
         // Create the counter set
         CounterSetPtr counterSet = std::make_unique<CounterSet>(counterSetUid, counterSetName, count);
-        BOOST_ASSERT(counterSet);
+        ARMNN_ASSERT(counterSet);
 
         // Get the raw counter set pointer
         const CounterSet* counterSetPtr = counterSet.get();
-        BOOST_ASSERT(counterSetPtr);
+        ARMNN_ASSERT(counterSetPtr);
 
         // Register the counter set
         m_CounterSets.insert(std::make_pair(counterSetUid, std::move(counterSet)));
@@ -528,7 +528,7 @@ public:
 
         // Get the counter UIDs and calculate the max counter UID
         std::vector<uint16_t> counterUids = GetNextCounterUids(uid, deviceCores);
-        BOOST_ASSERT(!counterUids.empty());
+        ARMNN_ASSERT(!counterUids.empty());
         uint16_t maxCounterUid = deviceCores <= 1 ? counterUids.front() : counterUids.back();
 
         // Get the counter units
@@ -546,18 +546,18 @@ public:
                                                        unitsValue,
                                                        deviceUidValue,
                                                        counterSetUidValue);
-        BOOST_ASSERT(counter);
+        ARMNN_ASSERT(counter);
 
         // Get the raw counter pointer
         const Counter* counterPtr = counter.get();
-        BOOST_ASSERT(counterPtr);
+        ARMNN_ASSERT(counterPtr);
 
         // Process multiple counters if necessary
         for (uint16_t counterUid : counterUids)
         {
             // Connect the counter to the parent category
             Category* parentCategory = const_cast<Category*>(GetCategory(parentCategoryName));
-            BOOST_ASSERT(parentCategory);
+            ARMNN_ASSERT(parentCategory);
             parentCategory->m_Counters.push_back(counterUid);
 
             // Register the counter
@@ -584,7 +584,7 @@ public:
     {
         auto it = std::find_if(m_Categories.begin(), m_Categories.end(), [&name](const CategoryPtr& category)
         {
-            BOOST_ASSERT(category);
+            ARMNN_ASSERT(category);
 
             return category->m_Name == name;
         });
index 8de69f1..5c63b54 100644 (file)
@@ -31,7 +31,7 @@ void VerifyTimelineHeaderBinary(const unsigned char* readableData,
                                 unsigned int& offset,
                                 uint32_t packetDataLength)
 {
-    BOOST_ASSERT(readableData);
+    ARMNN_ASSERT(readableData);
 
     // Utils
     unsigned int uint32_t_size = sizeof(uint32_t);
@@ -60,7 +60,7 @@ void VerifyTimelineLabelBinaryPacketData(Optional<ProfilingGuid> guid,
                                          const unsigned char* readableData,
                                          unsigned int& offset)
 {
-    BOOST_ASSERT(readableData);
+    ARMNN_ASSERT(readableData);
 
     // Utils
     unsigned int uint32_t_size = sizeof(uint32_t);
@@ -101,7 +101,7 @@ void VerifyTimelineEventClassBinaryPacketData(ProfilingGuid guid,
                                               const unsigned char* readableData,
                                               unsigned int& offset)
 {
-    BOOST_ASSERT(readableData);
+    ARMNN_ASSERT(readableData);
 
     // Utils
     unsigned int uint32_t_size = sizeof(uint32_t);
@@ -127,7 +127,7 @@ void VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType relati
                                             const unsigned char* readableData,
                                             unsigned int& offset)
 {
-    BOOST_ASSERT(readableData);
+    ARMNN_ASSERT(readableData);
 
     uint32_t relationshipTypeUint = 0;
     switch (relationshipType)
@@ -205,7 +205,7 @@ void VerifyTimelineEntityBinaryPacketData(Optional<ProfilingGuid> guid,
                                           const unsigned char* readableData,
                                           unsigned int& offset)
 {
-    BOOST_ASSERT(readableData);
+    ARMNN_ASSERT(readableData);
 
     // Utils
     unsigned int uint32_t_size = sizeof(uint32_t);
@@ -238,7 +238,7 @@ void VerifyTimelineEventBinaryPacket(Optional<uint64_t> timestamp,
                                      const unsigned char* readableData,
                                      unsigned int& offset)
 {
-    BOOST_ASSERT(readableData);
+    ARMNN_ASSERT(readableData);
 
     // Utils
     unsigned int uint32_t_size = sizeof(uint32_t);
index 51f049d..a3c237f 100644 (file)
@@ -536,7 +536,7 @@ BOOST_AUTO_TEST_CASE(CreateEventRecordTest)
                                                          counterUnits,
                                                          deviceUid,
                                                          counterSetUid);
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     // Create an event record
     SendCounterPacket::EventRecord eventRecord;
@@ -656,7 +656,7 @@ BOOST_AUTO_TEST_CASE(CreateEventRecordNoUnitsTest)
                                                          "",
                                                          deviceUid,
                                                          counterSetUid);
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     // Create an event record
     SendCounterPacket::EventRecord eventRecord;
@@ -761,7 +761,7 @@ BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest1)
                                                          counterUnits,
                                                          deviceUid,
                                                          counterSetUid);
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     // Create an event record
     SendCounterPacket::EventRecord eventRecord;
@@ -800,7 +800,7 @@ BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest2)
                                                          counterUnits,
                                                          deviceUid,
                                                          counterSetUid);
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     // Create an event record
     SendCounterPacket::EventRecord eventRecord;
@@ -839,7 +839,7 @@ BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest3)
                                                          counterUnits,
                                                          deviceUid,
                                                          counterSetUid);
-    BOOST_ASSERT(counter);
+    ARMNN_ASSERT(counter);
 
     // Create an event record
     SendCounterPacket::EventRecord eventRecord;
@@ -859,7 +859,7 @@ BOOST_AUTO_TEST_CASE(CreateCategoryRecordTest)
     // Create a category for testing
     const std::string categoryName = "some_category";
     const CategoryPtr category = std::make_unique<Category>(categoryName);
-    BOOST_ASSERT(category);
+    ARMNN_ASSERT(category);
     category->m_Counters = { 11u, 23u, 5670u };
 
     // Create a collection of counters
@@ -903,9 +903,9 @@ BOOST_AUTO_TEST_CASE(CreateCategoryRecordTest)
     Counter* counter1 = counters.find(11)->second.get();
     Counter* counter2 = counters.find(23)->second.get();
     Counter* counter3 = counters.find(5670)->second.get();
-    BOOST_ASSERT(counter1);
-    BOOST_ASSERT(counter2);
-    BOOST_ASSERT(counter3);
+    ARMNN_ASSERT(counter1);
+    ARMNN_ASSERT(counter2);
+    ARMNN_ASSERT(counter3);
     uint16_t categoryEventCount = boost::numeric_cast<uint16_t>(counters.size());
 
     // Create a category record
index 7a5f796..84c88ad 100644 (file)
@@ -13,9 +13,9 @@
 #include <armnn/Exceptions.hpp>
 #include <armnn/Optional.hpp>
 #include <armnn/Conversion.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <atomic>
index 6adc75d..7e70289 100644 (file)
@@ -6,7 +6,6 @@
 #include "CaffePreprocessor.hpp"
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 #include <iostream>
index 07b55d2..7a33d34 100644 (file)
@@ -7,9 +7,9 @@
 #include "InferenceTest.hpp"
 #include "DeepSpeechV1Database.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/test/tools/floating_point_comparison.hpp>
 
@@ -40,13 +40,13 @@ public:
     {
         armnn::IgnoreUnused(options);
         const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
-        BOOST_ASSERT(output1.size() == k_OutputSize1);
+        ARMNN_ASSERT(output1.size() == k_OutputSize1);
 
         const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // new_state_c
-        BOOST_ASSERT(output2.size() == k_OutputSize2);
+        ARMNN_ASSERT(output2.size() == k_OutputSize2);
 
         const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // new_state_h
-        BOOST_ASSERT(output3.size() == k_OutputSize3);
+        ARMNN_ASSERT(output3.size() == k_OutputSize3);
 
         // Check each output to see whether it is the expected value
         for (unsigned int j = 0u; j < output1.size(); j++)
index a59f580..9252a46 100644 (file)
@@ -127,7 +127,7 @@ int main(int argc, const char* argv[])
         // Coverity points out that default_value(...) can throw a bad_lexical_cast,
         // and that desc.add_options() can throw boost::io::too_few_args.
         // They really won't in any of these cases.
-        BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+        ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
         ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
         return EXIT_FAILURE;
     }
index f0184e4..5a42b8a 100644 (file)
@@ -11,7 +11,6 @@
 #include <armnnUtils/Permute.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 
 #include <iostream>
index 0529770..af931f9 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <armnn/ArmNN.hpp>
 #include <armnn/BackendRegistry.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #if defined(ARMNN_SERIALIZER)
 #include "armnnDeserializer/IDeserializer.hpp"
@@ -179,7 +180,7 @@ public:
                                      std::vector<armnn::BindingPointInfo>& outputBindings)
     {
         auto parser(IParser::Create());
-        BOOST_ASSERT(parser);
+        ARMNN_ASSERT(parser);
 
         armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
 
index c6e5011..7e165b5 100644 (file)
@@ -4,11 +4,12 @@
 //
 #include "InferenceTest.hpp"
 
+#include <armnn/utility/Assert.hpp>
+
 #include "../src/armnn/Profiling.hpp"
 #include <boost/algorithm/string.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/filesystem/path.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/program_options.hpp>
 #include <boost/filesystem/operations.hpp>
@@ -55,7 +56,7 @@ bool ParseCommandLine(int argc, char** argv, IInferenceTestCaseProvider& testCas
         // Coverity points out that default_value(...) can throw a bad_lexical_cast,
         // and that desc.add_options() can throw boost::io::too_few_args.
         // They really won't in any of these cases.
-        BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+        ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
         std::cerr << "Fatal internal error: " << e.what() << std::endl;
         return false;
     }
@@ -228,7 +229,7 @@ bool InferenceTest(const InferenceTestOptions& params,
             success = false;
             break;
         default:
-            BOOST_ASSERT_MSG(false, "Unexpected TestCaseResult");
+            ARMNN_ASSERT_MSG(false, "Unexpected TestCaseResult");
             return false;
         }
     }
index 5b9b45a..ed16464 100644 (file)
@@ -4,10 +4,10 @@
 //
 #include "InferenceTest.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <boost/algorithm/string.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/filesystem/path.hpp>
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/program_options.hpp>
 #include <boost/filesystem/operations.hpp>
@@ -80,7 +80,7 @@ struct ClassifierResultProcessor : public boost::static_visitor<>
     void operator()(const std::vector<int>& values)
     {
         IgnoreUnused(values);
-        BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
+        ARMNN_ASSERT_MSG(false, "Non-float predictions output not supported.");
     }
 
     ResultMap& GetResultMap() { return m_ResultMap; }
@@ -360,9 +360,9 @@ int ClassifierInferenceTestMain(int argc,
                                 const armnn::TensorShape* inputTensorShape)
 
 {
-    BOOST_ASSERT(modelFilename);
-    BOOST_ASSERT(inputBindingName);
-    BOOST_ASSERT(outputBindingName);
+    ARMNN_ASSERT(modelFilename);
+    ARMNN_ASSERT(inputBindingName);
+    ARMNN_ASSERT(outputBindingName);
 
     return InferenceTestMain(argc, argv, defaultTestCaseIds,
         [=]
index 83c5cce..1cf73ca 100644 (file)
@@ -4,6 +4,7 @@
 //
 #include "InferenceTestImage.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <boost/format.hpp>
@@ -165,7 +166,7 @@ std::tuple<uint8_t, uint8_t, uint8_t> InferenceTestImage::GetPixelAs3Channels(un
 
     const unsigned int pixelOffset = x * GetNumChannels() + y * GetWidth() * GetNumChannels();
     const uint8_t* const pixelData = m_Data.data() + pixelOffset;
-    BOOST_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes()));
+    ARMNN_ASSERT(pixelData <= (m_Data.data() + GetSizeInBytes()));
 
     std::array<uint8_t, 3> outPixelData;
     outPixelData.fill(0);
index bd5029f..c1c5f63 100644 (file)
@@ -7,7 +7,7 @@
 #include <armnn/Logging.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/assert.hpp>
+
 #include <fstream>
 #include <vector>
 
index a26712c..e02a4ac 100644 (file)
@@ -7,9 +7,9 @@
 #include "InferenceTest.hpp"
 #include "MobileNetSsdDatabase.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/test/tools/floating_point_comparison.hpp>
 
@@ -38,16 +38,16 @@ public:
         armnn::IgnoreUnused(options);
 
         const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
-        BOOST_ASSERT(output1.size() == k_OutputSize1);
+        ARMNN_ASSERT(output1.size() == k_OutputSize1);
 
         const std::vector<float>& output2 = boost::get<std::vector<float>>(this->GetOutputs()[1]); // classes
-        BOOST_ASSERT(output2.size() == k_OutputSize2);
+        ARMNN_ASSERT(output2.size() == k_OutputSize2);
 
         const std::vector<float>& output3 = boost::get<std::vector<float>>(this->GetOutputs()[2]); // scores
-        BOOST_ASSERT(output3.size() == k_OutputSize3);
+        ARMNN_ASSERT(output3.size() == k_OutputSize3);
 
         const std::vector<float>& output4 = boost::get<std::vector<float>>(this->GetOutputs()[3]); // valid detections
-        BOOST_ASSERT(output4.size() == k_OutputSize4);
+        ARMNN_ASSERT(output4.size() == k_OutputSize4);
 
         const size_t numDetections = boost::numeric_cast<size_t>(output4[0]);
 
index ecfc212..dd1c295 100644 (file)
@@ -109,7 +109,7 @@ int main(int argc, char* argv[])
             // Coverity points out that default_value(...) can throw a bad_lexical_cast,
             // and that desc.add_options() can throw boost::io::too_few_args.
             // They really won't in any of these cases.
-            BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+            ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
             std::cerr << "Fatal internal error: " << e.what() << std::endl;
             return 1;
         }
index 5c969c6..0e72f7b 100644 (file)
@@ -59,7 +59,7 @@ int main(int argc, char* argv[])
             // Coverity points out that default_value(...) can throw a bad_lexical_cast,
             // and that desc.add_options() can throw boost::io::too_few_args.
             // They really won't in any of these cases.
-            BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+            ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
             std::cerr << "Fatal internal error: " << e.what() << std::endl;
             return 1;
         }
index a0aeb8b..278ba1b 100644 (file)
@@ -824,7 +824,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
         // Coverity points out that default_value(...) can throw a bad_lexical_cast,
         // and that desc.add_options() can throw boost::io::too_few_args.
         // They really won't in any of these cases.
-        BOOST_ASSERT_MSG(false, "Caught unexpected exception");
+        ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
         ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
         return EXIT_FAILURE;
     }
index 98db8d4..3b3e5a9 100644 (file)
@@ -12,7 +12,6 @@
 #include <tuple>
 #include <utility>
 
-#include <boost/assert.hpp>
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
index 4190e72..6c783d3 100644 (file)
@@ -7,13 +7,13 @@
 #include "InferenceTest.hpp"
 #include "YoloDatabase.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <algorithm>
 #include <array>
 #include <utility>
 
-#include <boost/assert.hpp>
 #include <boost/multi_array.hpp>
 #include <boost/test/tools/floating_point_comparison.hpp>
 
@@ -39,7 +39,7 @@ public:
         using Boost3dArray = boost::multi_array<float, 3>;
 
         const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);
-        BOOST_ASSERT(output.size() == YoloOutputSize);
+        ARMNN_ASSERT(output.size() == YoloOutputSize);
 
         constexpr Boost3dArray::index gridSize = 7;
         constexpr Boost3dArray::index numClasses = 20;
@@ -96,7 +96,7 @@ public:
                 }
             }
         }
-        BOOST_ASSERT(output.data() + YoloOutputSize == outputPtr);
+        ARMNN_ASSERT(output.data() + YoloOutputSize == outputPtr);
 
         std::vector<YoloDetectedObject> detectedObjects;
         detectedObjects.reserve(gridSize * gridSize * numScales * numClasses);
index 3e19c25..aad335d 100644 (file)
@@ -362,7 +362,7 @@ armnn::profiling::Packet GatordMockService::ReceivePacket()
 
     profiling::CommandHandlerFunctor* commandHandlerFunctor =
         m_HandlerRegistry.GetFunctor(packetRx.GetPacketFamily(), packetRx.GetPacketId(), version.GetEncodedValue());
-    BOOST_ASSERT(commandHandlerFunctor);
+    ARMNN_ASSERT(commandHandlerFunctor);
     commandHandlerFunctor->operator()(packetRx);
     return packetRx;
 }
index 7417946..f8b42df 100644 (file)
@@ -98,11 +98,11 @@ BOOST_AUTO_TEST_CASE(CounterCaptureHandlingTest)
     commandHandler(packet1);
     commandHandler(packet2);
 
-    BOOST_ASSERT(commandHandler.m_CurrentPeriodValue == 5000);
+    ARMNN_ASSERT(commandHandler.m_CurrentPeriodValue == 5000);
 
     for (size_t i = 0; i < commandHandler.m_CounterCaptureValues.m_Uids.size(); ++i)
     {
-        BOOST_ASSERT(commandHandler.m_CounterCaptureValues.m_Uids[i] == i);
+        ARMNN_ASSERT(commandHandler.m_CounterCaptureValues.m_Uids[i] == i);
     }
 }