IVGCVSW-1964 : replace optional biases with home-grown Optional
authorDavid Beck <david.beck@arm.com>
Thu, 4 Oct 2018 14:43:17 +0000 (15:43 +0100)
committerMatthew Bentham <matthew.bentham@arm.com>
Wed, 10 Oct 2018 15:16:58 +0000 (16:16 +0100)
!android-nn-driver:151788

Change-Id: Ibdc41d09b8df05e7a0360dcb8a060860dfb1bd99

24 files changed:
Android.mk
CMakeLists.txt
include/armnn/Exceptions.hpp
include/armnn/ILayerSupport.hpp
include/armnn/LayerSupport.hpp
include/armnn/Optional.hpp [new file with mode: 0644]
src/armnn/LayerSupport.cpp
src/armnn/test/OptionalTest.cpp [new file with mode: 0644]
src/backends/ILayerSupport.cpp
src/backends/WorkloadFactory.cpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClLayerSupport.hpp
src/backends/cl/workloads/ClConvolution2dWorkload.cpp
src/backends/cl/workloads/ClConvolution2dWorkload.hpp
src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.cpp
src/backends/cl/workloads/ClDepthwiseConvolutionBaseWorkload.hpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
src/backends/neon/workloads/NeonConvolution2dBaseWorkload.hpp
src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.cpp
src/backends/neon/workloads/NeonDepthwiseConvolutionBaseWorkload.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp

index d1810545254bc100ca0a719df789be6fd2f1c3e7..95b10b95335ac3f6f7a925c8badf20fec9227535 100644 (file)
@@ -193,6 +193,7 @@ LOCAL_SRC_FILES := \
        src/armnn/test/OpenClTimerTest.cpp \
        src/armnn/test/ProfilingEventTest.cpp \
        src/armnn/test/ObservableTest.cpp \
+       src/armnn/test/OptionalTest.cpp \
        src/backends/test/IsLayerSupportedTest.cpp \
        src/backends/test/Reference.cpp \
        src/backends/test/WorkloadDataValidation.cpp \
index 56b09356542e058f4e29103699e42ec6a8c62c44..f0fe5a15259c3099a8e3f23adb79302729ec59bb 100644 (file)
@@ -152,6 +152,7 @@ list(APPEND armnn_sources
     include/armnn/IRuntime.hpp
     include/armnn/ILayerSupport.hpp
     include/armnn/INetwork.hpp
+    include/armnn/Optional.hpp
     include/armnn/Tensor.hpp
     include/armnn/TensorFwd.hpp
     include/armnn/Types.hpp
@@ -376,6 +377,7 @@ if(BUILD_UNIT_TESTS)
         src/armnn/test/GraphUtils.hpp
         src/armnn/test/InstrumentTests.cpp
         src/armnn/test/ObservableTest.cpp
+        src/armnn/test/OptionalTest.cpp
         src/backends/test/IsLayerSupportedTest.cpp
         src/backends/test/IsLayerSupportedTestImpl.hpp
         src/backends/test/Reference.cpp
index 4f3bea0e3069208d0907f572d839f268d5030b3a..89b6f2cfbb53bc1856985950dbfa7c01c5310a48 100644 (file)
@@ -95,6 +95,11 @@ class GraphValidationException : public Exception
     using Exception::Exception;
 };
 
+class BadOptionalAccessException : public Exception
+{
+    using Exception::Exception;
+};
+
 template <typename ExceptionType>
 void ConditionalThrow(bool condition, const std::string& message)
 {
index 7962393f34d98af68e27bb6868ed00076a8bc73e..b9b41b7fcffa2362207548a4fe9a198f78c5a626 100644 (file)
@@ -5,9 +5,9 @@
 #pragma once
 
 #include <armnn/DescriptorsFwd.hpp>
-
-#include <boost/optional.hpp>
+#include <armnn/Optional.hpp>
 #include <vector>
+#include <cctype>
 
 namespace armnn
 {
@@ -61,7 +61,7 @@ public:
                                           const TensorInfo& output,
                                           const Convolution2dDescriptor& descriptor,
                                           const TensorInfo& weights,
-                                          const boost::optional<TensorInfo>& biases,
+                                          const Optional<TensorInfo>& biases,
                                           char* reasonIfUnsupported = nullptr,
                                           size_t reasonIfUnsupportedMaxLength = 1024) const;
 
@@ -69,7 +69,7 @@ public:
                                                  const TensorInfo& output,
                                                  const DepthwiseConvolution2dDescriptor& descriptor,
                                                  const TensorInfo& weights,
-                                                 const boost::optional<TensorInfo>& biases,
+                                                 const Optional<TensorInfo>& biases,
                                                  char* reasonIfUnsupported = nullptr,
                                                  size_t reasonIfUnsupportedMaxLength = 1024) const;
 
index 25e888e71e7ea7dc6d64b265f0e84548369b14fd..31874fe944c1684ee476b8479f4fa637d51d53a1 100644 (file)
@@ -7,8 +7,7 @@
 #include <armnn/DescriptorsFwd.hpp>
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
-
-#include <boost/optional.hpp>
+#include <armnn/Optional.hpp>
 
 namespace armnn
 {
@@ -60,7 +59,7 @@ bool IsConvolution2dSupported(Compute compute,
                               const TensorInfo& output,
                               const Convolution2dDescriptor& descriptor,
                               const TensorInfo& weights,
-                              const boost::optional<TensorInfo>& biases,
+                              const Optional<TensorInfo>& biases,
                               char* reasonIfUnsupported = nullptr,
                               size_t reasonIfUnsupportedMaxLength = 1024);
 
@@ -69,7 +68,7 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
                                      const TensorInfo& output,
                                      const DepthwiseConvolution2dDescriptor& descriptor,
                                      const TensorInfo& weights,
-                                     const boost::optional<TensorInfo>& biases,
+                                     const Optional<TensorInfo>& biases,
                                      char* reasonIfUnsupported = nullptr,
                                      size_t reasonIfUnsupportedMaxLength = 1024);
 
diff --git a/include/armnn/Optional.hpp b/include/armnn/Optional.hpp
new file mode 100644 (file)
index 0000000..6fc207f
--- /dev/null
@@ -0,0 +1,123 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Exceptions.hpp"
+
+namespace armnn
+{
+
+// NOTE: the members of the Optional class don't follow the ArmNN
+//       coding convention because the interface to be close to
+//       the C++-17 interface so we can easily migrate to std::optional
+//       later.
+
+template <typename T>
+class Optional final
+{
+public:
+    Optional(T&& value)
+        : m_HasValue{true}
+    {
+        new (m_Storage) T(value);
+    }
+
+    Optional(const T& value)
+        : m_HasValue{true}
+    {
+        new (m_Storage) T(value);
+    }
+
+    Optional(const Optional& other)
+        : m_HasValue{false}
+    {
+        *this = other;
+    }
+
+    Optional() noexcept
+        : m_HasValue{false}
+    {
+    }
+
+    ~Optional()
+    {
+        reset();
+    }
+
+    operator bool() const noexcept
+    {
+        return has_value();
+    }
+
+    Optional& operator=(T&& value)
+    {
+        reset();
+        new (m_Storage) T(value);
+        m_HasValue = true;
+        return *this;
+    }
+
+    Optional& operator=(const T& value)
+    {
+        reset();
+        new(m_Storage) T(value);
+        m_HasValue = true;
+        return *this;
+    }
+
+    Optional& operator=(const Optional& other)
+    {
+        reset();
+        if (other.has_value())
+        {
+            new (m_Storage) T(other.value());
+            m_HasValue = true;
+        }
+
+        return *this;
+    }
+
+    const T& value() const
+    {
+        if (!has_value())
+        {
+            throw BadOptionalAccessException("Optional has no value");
+        }
+
+        auto valuePtr = reinterpret_cast<const T*>(m_Storage);
+        return *valuePtr;
+    }
+
+    T& value()
+    {
+        if (!has_value())
+        {
+            throw BadOptionalAccessException("Optional has no value");
+        }
+
+        auto valuePtr = reinterpret_cast<T*>(m_Storage);
+        return *valuePtr;
+    }
+
+    bool has_value() const noexcept
+    {
+        return m_HasValue;
+    }
+
+    void reset()
+    {
+        if (has_value())
+        {
+            value().T::~T();
+            m_HasValue = false;
+        }
+    }
+
+private:
+    alignas(alignof(T)) unsigned char m_Storage[sizeof(T)];
+    bool m_HasValue;
+};
+
+}
index 9561136d85c51735cff5c6660c367443bdccdeb7..3758ed40f6fd211ea6c469105814113e395fe116 100644 (file)
@@ -134,7 +134,7 @@ bool IsConvolution2dSupported(Compute compute,
                               const TensorInfo& output,
                               const Convolution2dDescriptor& descriptor,
                               const TensorInfo& weights,
-                              const boost::optional<TensorInfo>& biases,
+                              const Optional<TensorInfo>& biases,
                               char* reasonIfUnsupported,
                               size_t reasonIfUnsupportedMaxLength)
 {
@@ -166,7 +166,7 @@ bool IsDepthwiseConvolutionSupported(Compute compute,
                                      const TensorInfo& output,
                                      const DepthwiseConvolution2dDescriptor& descriptor,
                                      const TensorInfo& weights,
-                                     const boost::optional<TensorInfo>& biases,
+                                     const Optional<TensorInfo>& biases,
                                      char* reasonIfUnsupported,
                                      size_t reasonIfUnsupportedMaxLength)
 {
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
new file mode 100644 (file)
index 0000000..1b5aaa7
--- /dev/null
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <boost/test/unit_test.hpp>
+
+#include <armnn/Optional.hpp>
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(OptionalTests)
+
+BOOST_AUTO_TEST_CASE(SimpleStringTests)
+{
+    armnn::Optional<std::string> optionalString;
+    BOOST_TEST(optionalString == false);
+    BOOST_TEST(optionalString.has_value() == false);
+
+    optionalString = std::string("Hello World");
+    BOOST_TEST(optionalString == true);
+    BOOST_TEST(optionalString.has_value() == true);
+    BOOST_TEST(optionalString.value() == "Hello World");
+
+    armnn::Optional<std::string> otherString;
+    otherString = optionalString;
+    BOOST_TEST(otherString == true);
+    BOOST_TEST(otherString.value() == "Hello World");
+
+    optionalString.reset();
+    BOOST_TEST(optionalString == false);
+    BOOST_TEST(optionalString.has_value() == false);
+
+    const std::string stringValue("Hello World");
+    armnn::Optional<std::string> optionalString2(stringValue);
+    BOOST_TEST(optionalString2 == true);
+    BOOST_TEST(optionalString2.has_value() == true);
+    BOOST_TEST(optionalString2.value() == "Hello World");
+
+    armnn::Optional<std::string> optionalString3(std::move(optionalString2));
+    BOOST_TEST(optionalString3 == true);
+    BOOST_TEST(optionalString3.has_value() == true);
+    BOOST_TEST(optionalString3.value() == "Hello World");
+}
+
+BOOST_AUTO_TEST_CASE(SimpleIntTests)
+{
+    const int intValue = 123;
+
+    armnn::Optional<int> optionalInt;
+    BOOST_TEST(optionalInt == false);
+    BOOST_TEST(optionalInt.has_value() == false);
+
+    optionalInt = intValue;
+    BOOST_TEST(optionalInt == true);
+    BOOST_TEST(optionalInt.has_value() == true);
+    BOOST_TEST(optionalInt.value() == intValue);
+
+    armnn::Optional<int> otherOptionalInt;
+    otherOptionalInt = optionalInt;
+    BOOST_TEST(otherOptionalInt == true);
+    BOOST_TEST(otherOptionalInt.value() == intValue);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
index c0446e93baf2c17f9afa74654e7b94af0bf5fd0c..ff4b80d22b947dce7f9b6dabed5b43a4f37e183e 100644 (file)
@@ -89,7 +89,7 @@ bool ILayerSupport::IsConvolution2dSupported(const TensorInfo& input,
                                              const TensorInfo& output,
                                              const Convolution2dDescriptor& descriptor,
                                              const TensorInfo& weights,
-                                             const boost::optional<TensorInfo>& biases,
+                                             const Optional<TensorInfo>& biases,
                                              char* reasonIfUnsupported,
                                              size_t reasonIfUnsupportedMaxLength) const
 {
@@ -100,7 +100,7 @@ bool ILayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
                                                     const TensorInfo& output,
                                                     const DepthwiseConvolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const boost::optional<TensorInfo>& biases,
+                                                    const Optional<TensorInfo>& biases,
                                                     char* reasonIfUnsupported,
                                                     size_t reasonIfUnsupportedMaxLength) const
 {
index dc9c1bc624f62ccd36be562da5368469ce691823..05919d6d9552d68d299d3489557413eec4f269b7 100644 (file)
@@ -144,11 +144,11 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute,
             const Convolution2dDescriptor& descriptor  = cLayer->GetParameters();
 
             // Construct optional biases object based on the value of m_BiasEnabled
-            boost::optional<TensorInfo> biases(boost::none);
+            Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                biases = boost::make_optional(
-                    OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)));
+                biases =
+                    OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
             }
 
             result = IsConvolution2dSupported(compute,
@@ -181,11 +181,11 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute,
             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
 
             // Construct optional biases object based on the value of m_BiasEnabled
-            boost::optional<TensorInfo> biases(boost::none);
+            Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                biases = boost::make_optional(
-                    OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType)));
+                biases =
+                    OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
             }
 
             result = IsDepthwiseConvolutionSupported(compute,
index e23c70ec302fda6e4b7eb99382bdcee72508acfa..09dfab9924df4acd1cbd0dad6a07a9df75345834 100644 (file)
@@ -212,7 +212,7 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
                                 const TensorInfo& output,
                                 const Convolution2dDescriptor& descriptor,
                                 const TensorInfo& weights,
-                                const boost::optional<TensorInfo>& biases,
+                                const Optional<TensorInfo>& biases,
                                 std::string* reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
@@ -228,7 +228,7 @@ bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const DepthwiseConvolution2dDescriptor& descriptor,
                                        const TensorInfo& weights,
-                                       const boost::optional<TensorInfo>& biases,
+                                       const Optional<TensorInfo>& biases,
                                        std::string* reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
index 80e8488b3ba7160b85c792e0bd134edf87df2f3f..314ac4c73f2ef91c08ddffd00fbdfbd7215cfbae 100644 (file)
@@ -9,8 +9,6 @@
 #include <armnn/Tensor.hpp>
 #include <armnn/ArmNN.hpp>
 
-#include <boost/optional.hpp>
-
 namespace armnn
 {
 
@@ -50,14 +48,14 @@ bool IsConvolution2dSupportedCl(const TensorInfo& input,
                                 const TensorInfo& output,
                                 const Convolution2dDescriptor& descriptor,
                                 const TensorInfo& weights,
-                                const boost::optional<TensorInfo>& biases,
+                                const Optional<TensorInfo>& biases,
                                 std::string* reasonIfUnsupported = nullptr);
 
 bool IsDepthwiseConvolutionSupportedCl(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const DepthwiseConvolution2dDescriptor& descriptor,
                                        const TensorInfo& weights,
-                                       const boost::optional<TensorInfo>& biases,
+                                       const Optional<TensorInfo>& biases,
                                        std::string* reasonIfUnsupported = nullptr);
 
 bool IsDivisionSupportedCl(const TensorInfo& input0,
index 521711becc8126ed58ac2a3ddf3ba668f0a4edbe..301859ee1b0fb9d4b6e52eb63dcc7e34cd4409f5 100644 (file)
@@ -24,7 +24,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
                                                     const TensorInfo& output,
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const boost::optional<TensorInfo>& biases)
+                                                    const Optional<TensorInfo>& biases)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -35,9 +35,9 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.is_initialized());
+        BOOST_ASSERT(biases.has_value());
 
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+        aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
index 14a39f3b25a011f1e0a1fcfbd6134011f43f33cf..a5de87639b499cfc3ef323d4a1dc53228e5dfd87 100644 (file)
@@ -13,8 +13,6 @@
 #include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
 
-#include <boost/optional.hpp>
-
 #include <memory>
 
 namespace armnn
@@ -24,7 +22,7 @@ arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
                                                     const TensorInfo& output,
                                                     const Convolution2dDescriptor& descriptor,
                                                     const TensorInfo& weights,
-                                                    const boost::optional<TensorInfo>& biases);
+                                                    const Optional<TensorInfo>& biases);
 
 class ClConvolution2dWorkload : public BaseWorkload<Convolution2dQueueDescriptor>
 {
index 5a036db9225911f799c6a8035395d1afed916625..53ac3bae99e062b2c85ac2bd686dbc7a1d0e1e7b 100644 (file)
@@ -21,7 +21,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
     const TensorInfo& output,
     const DepthwiseConvolution2dDescriptor& descriptor,
     const TensorInfo& weights,
-    const boost::optional<TensorInfo>& biases)
+    const Optional<TensorInfo>& biases)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -32,9 +32,9 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.is_initialized());
+        BOOST_ASSERT(biases.has_value());
 
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+        aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
index 9d5cde30b63eea5575111f65b0bb893af27487ba..27aec8ecddd520205c00dc8e7d18e7a013b30e04 100644 (file)
@@ -6,7 +6,6 @@
 #pragma once
 
 #include <backends/Workload.hpp>
-#include <boost/optional.hpp>
 
 #include <arm_compute/runtime/CL/CLFunctions.h>
 
@@ -17,7 +16,7 @@ arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& inp
                                                            const TensorInfo& output,
                                                            const DepthwiseConvolution2dDescriptor& descriptor,
                                                            const TensorInfo& weights,
-                                                           const boost::optional<TensorInfo>& biases);
+                                                           const Optional<TensorInfo>& biases);
 
 template<armnn::DataType... dataTypes>
 class ClDepthwiseConvolutionBaseWorkload : public TypedWorkload<DepthwiseConvolution2dQueueDescriptor, dataTypes...>
index bc1f96be3f47f6386755f3a3ee9c3408e8b826e6..ef70fbd37058d0f711a319787c33ed84e0346033 100644 (file)
@@ -198,7 +198,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const Convolution2dDescriptor& descriptor,
                                   const TensorInfo& weights,
-                                  const boost::optional<TensorInfo>& biases,
+                                  const Optional<TensorInfo>& biases,
                                   std::string* reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
@@ -214,7 +214,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const DepthwiseConvolution2dDescriptor& descriptor,
                                          const TensorInfo& weights,
-                                         const boost::optional<TensorInfo>& biases,
+                                         const Optional<TensorInfo>& biases,
                                          std::string* reasonIfUnsupported)
 {
     FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
index 439c45f59efda7a283c0afff56f0ecf5cf1023b6..8b674c6460bfa35e1fa6a30c1f1efa632dcb89eb 100644 (file)
@@ -8,8 +8,6 @@
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 
-#include <boost/optional.hpp>
-
 namespace armnn
 {
 
@@ -53,7 +51,7 @@ bool IsConvolution2dSupportedNeon(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const Convolution2dDescriptor& descriptor,
                                   const TensorInfo& weights,
-                                  const boost::optional<TensorInfo>& biases,
+                                  const Optional<TensorInfo>& biases,
                                   std::string* reasonIfUnsupported = nullptr);
 
 
@@ -61,7 +59,7 @@ bool IsDepthwiseConvolutionSupportedNeon(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const DepthwiseConvolution2dDescriptor& descriptor,
                                          const TensorInfo& weights,
-                                         const boost::optional<TensorInfo>& biases,
+                                         const Optional<TensorInfo>& biases,
                                          std::string* reasonIfUnsupported = nullptr);
 
 bool IsDivisionSupportedNeon(const TensorInfo& input0,
index 02edabfd9cd9647e5468c720421f746185f2a798..8da3e472491a94312175733dfca5ff35573ff5e7 100644 (file)
@@ -21,7 +21,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
     const TensorInfo& output,
     const Convolution2dDescriptor& descriptor,
     const TensorInfo& weights,
-    const boost::optional<TensorInfo>& biases)
+    const Optional<TensorInfo>& biases)
 {
     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
@@ -32,9 +32,9 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.is_initialized());
+        BOOST_ASSERT(biases.has_value());
 
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+        aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
index 6af89c1f014f429f8cc91c4016a589c1f7847880..1cd30c70f96e023497ce0cef33ddd020678879e0 100644 (file)
@@ -13,8 +13,6 @@
 
 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
 
-#include <boost/optional.hpp>
-
 #include <memory>
 
 namespace armnn
@@ -24,7 +22,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
     const TensorInfo& output,
     const Convolution2dDescriptor& descriptor,
     const TensorInfo& weights,
-    const boost::optional<TensorInfo>& biases);
+    const Optional<TensorInfo>& biases);
 
 template<armnn::DataType... dataTypes>
 class NeonConvolution2dBaseWorkload : public TypedWorkload<Convolution2dQueueDescriptor, dataTypes...>
index ef60b3238d1a5ca4dd119af751d8af52585438d0..aa535adec987ffc1f1c7c53a25e87df877a2f290 100644 (file)
@@ -14,7 +14,7 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
     const TensorInfo& output,
     const DepthwiseConvolution2dDescriptor& descriptor,
     const TensorInfo& weights,
-    const boost::optional<TensorInfo>& biases)
+    const Optional<TensorInfo>& biases)
 {
     const arm_compute::TensorInfo aclInputInfo =
         armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
@@ -28,9 +28,9 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
 
     if (descriptor.m_BiasEnabled)
     {
-        BOOST_ASSERT(biases.is_initialized());
+        BOOST_ASSERT(biases.has_value());
 
-        aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.get(), descriptor.m_DataLayout);
+        aclBiasesInfo = armcomputetensorutils::BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
index 982992a3639135860dc57b6b551ac1cf741afd9a..ffee50861ac9fbb63572120f748aabcb9a87054d 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <backends/neon/workloads/NeonWorkloadUtils.hpp>
 
-#include <boost/optional.hpp>
-
 namespace armnn
 {
 
@@ -16,6 +14,6 @@ arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& i
                                                              const TensorInfo& output,
                                                              const DepthwiseConvolution2dDescriptor& descriptor,
                                                              const TensorInfo& weights,
-                                                             const boost::optional<TensorInfo>& biases);
+                                                             const Optional<TensorInfo>& biases);
 
 } // namespace armnn
index 1ca3d5b6d642201068b989a3daedb7b42f50ffc9..a42efb748f6e7d632d438acf191cd56974000617 100644 (file)
@@ -87,7 +87,7 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const Convolution2dDescriptor& descriptor,
                                  const TensorInfo& weights,
-                                 const boost::optional<TensorInfo>& biases,
+                                 const Optional<TensorInfo>& biases,
                                  std::string* reasonIfUnsupported)
 {
     ignore_unused(descriptor);
@@ -104,7 +104,7 @@ bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
                                         const TensorInfo& output,
                                         const DepthwiseConvolution2dDescriptor& descriptor,
                                         const TensorInfo& weights,
-                                        const boost::optional<TensorInfo>& biases,
+                                        const Optional<TensorInfo>& biases,
                                         std::string* reasonIfUnsupported)
 {
     ignore_unused(output);
index 0fac886234e3eea3710899f203af7f18885d62ac..dcc5dd3ddff918063d944ffa400108b888097f70 100644 (file)
@@ -8,9 +8,6 @@
 #include <armnn/Types.hpp>
 #include <armnn/Tensor.hpp>
 #include <layers/LstmLayer.hpp>
-#include <boost/optional.hpp>
-
-#include <boost/optional.hpp>
 
 namespace armnn
 {
@@ -46,14 +43,14 @@ bool IsConvolution2dSupportedRef(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const Convolution2dDescriptor& descriptor,
                                  const TensorInfo& weights,
-                                 const boost::optional<TensorInfo>& biases,
+                                 const Optional<TensorInfo>& biases,
                                  std::string* reasonIfUnsupported = nullptr);
 
 bool IsDepthwiseConvolutionSupportedRef(const TensorInfo& input,
                                         const TensorInfo& output,
                                         const DepthwiseConvolution2dDescriptor& descriptor,
                                         const TensorInfo& weights,
-                                        const boost::optional<TensorInfo>& biases,
+                                        const Optional<TensorInfo>& biases,
                                         std::string* reasonIfUnsupported = nullptr);
 
 bool IsDivisionSupportedRef(const TensorInfo& input0,