-/// Copyright (c) 2017 ARM Limited.
+/// Copyright (c) 2020 ARM Limited.
///
/// SPDX-License-Identifier: MIT
///
**/
}
+
-/// Copyright (c) 2017 ARM Limited.
+/// Copyright (c) 2020 ARM Limited.
///
/// SPDX-License-Identifier: MIT
///
## Although some other neural networks might work, Arm tests the Arm NN SDK with Caffe implementations of the following neural networks:
- AlexNet.
+- Cifar10.
- Inception-BN.
- Resnet_50, Resnet_101 and Resnet_152.
- VGG_CNN_S, VGG_16 and VGG_19.
- Lenet.
- MobileNetv1.
-using these datasets:
-- Cifar10.
-
## The Arm NN SDK supports the following machine learning layers for Caffe networks:
- BatchNorm, in inference mode.
- Add
- See the ONNX [Add documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Add) for more information
--AveragePool
+- AveragePool
- See the ONNX [AveragePool documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#AveragePool) for more information.
- Constant
- See the ONNX [Constant documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Constant) for more information.
- placeholder
- See the TensorFlow [placeholder documentation](https://www.tensorflow.org/api_docs/python/tf/placeholder) for more information.
- reduce_mean
- -See the TensorFlow [reduce_mean documentation](https://www.tensorflow.org/api_docs/python/tf/reduce_mean) for more information.
+ - See the TensorFlow [reduce_mean documentation](https://www.tensorflow.org/api_docs/python/tf/reduce_mean) for more information.
- relu
- See the TensorFlow [relu documentation](https://www.tensorflow.org/api_docs/python/tf/nn/relu) for more information.
- relu6
**/
}
+
-/// Copyright (c) 2017 ARM Limited.
+/// Copyright (c) 2020 ARM Limited.
///
/// SPDX-License-Identifier: MIT
///
-/// Copyright (c) 2017 ARM Limited.
+/// Copyright (c) 2020 ARM Limited.
///
/// SPDX-License-Identifier: MIT
///
-/// Copyright (c) 2017 ARM Limited.
+/// Copyright (c) 2020 ARM Limited.
///
/// SPDX-License-Identifier: MIT
///
At the end of test, the runtime object goes out of scope and the dynamic backend instance is automatically destroyed, and the handle to
the shared object is closed.
-<br/><br/><br/><br/><br/>
+<br/><br/><br/><br/>
@section S13_dynamic_backend_guide Standalone Dynamic Backend Developer Guide
-/// Copyright (c) 2017 ARM Limited.
+/// Copyright (c) 2020 ARM Limited.
///
/// SPDX-License-Identifier: MIT
///
# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
# the logo to the output directory.
-PROJECT_LOGO =
+PROJECT_LOGO = ./docs/Arm_NN_horizontal_blue.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_HEADER =
+HTML_HEADER = ./docs/header.xhtml
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
--- /dev/null
+<!-- Copyright (c) 2020 ARM Limited. -->
+<!-- -->
+<!-- SPDX-License-Identifier: MIT -->
+<!-- -->
+<!-- HTML header for doxygen 1.8.13-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen $doxygenversion"/>
+<meta name="robots" content="NOINDEX, NOFOLLOW" />
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
+<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
+<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="$relpath^jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
+$treeview
+$search
+$mathjax
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+$extrastylesheet
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+
+<!--BEGIN TITLEAREA-->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+ <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 10rem; margin-top: .5rem; margin-left 10px"/>
+ <!--BEGIN PROJECT_NAME-->
+ <td style="padding-left: 0.5em;">
+ <div id="projectname">
+ <!--BEGIN PROJECT_NUMBER--> <span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
+ </div>
+ <!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF-->
+ </td>
+ <!--END PROJECT_NAME-->
+ <!--BEGIN !PROJECT_NAME-->
+ <!--END !PROJECT_NAME-->
+ <!--BEGIN DISABLE_INDEX-->
+ <!--BEGIN SEARCHENGINE-->
+ <td>$searchbox</td>
+ <td><p>Hello World</p></td>
+ <!--END SEARCHENGINE-->
+ <!--END DISABLE_INDEX-->
+ </tr>
+ </tbody>
+</table>
+</div>
+<!--END TITLEAREA-->
+<!-- end header part -->
+/* Copyright (c) 2020 ARM Limited. */
/* Changes to tabs.css */
.tabs, .tabs2, .tabs3 {
- /* box-shadow: 0px 5px 30px rgba(0, 0, 0, 0.3); */
position: relative;
}
div.fragment {
background-color: #FCFCFC;
border: 1px solid #CFCFCF;
+ padding: 8px;
+ margin: 10px 2px;
}
div.line.glow {
background-color: #007fa3;
}
-.memitem.glow {
- /* box-shadow: 0 0 15px orange; */
-}
-
.memproto, dl.reflist dt {
border-top: 1px solid #B8B8B8;
border-left: 1px solid #B8B8B8;
border-color: #f68a33;
}
-#projectlogo
-{
- width:150px;
- text-align:left;
-}
-
#projectname
{
font: 200% Tahoma, Arial,sans-serif;
namespace armnn
{
-//
-// The Compute enum is now deprecated and it is now
-// being replaced by BackendId
-//
+///
+/// The Compute enum is now deprecated and it is now
+/// being replaced by BackendId
+///
enum class Compute
{
Undefined = 0,
return m_Id == other.m_Id;
}
- // comparison against objects from which the
- // BackendId can be constructed
+ /// comparison against objects from which the
+ /// BackendId can be constructed
template <typename O>
bool operator==(const O& other) const
{
namespace std
{
-// make BackendId compatible with std hashtables by reusing the hash
-// function for strings.
-// Note this must come *before* the first use of unordered_set<BackendId>.
+/// make BackendId compatible with std hashtables by reusing the hash
+/// function for strings.
+/// Note this must come *before* the first use of unordered_set<BackendId>.
template <>
struct hash<armnn::BackendId>
{
};
public:
- // Very basic type safe variant
+ /// Very basic type safe variant
class Var
{
explicit Var(const char* s) : m_Vals(s), m_Type(VarTypes::String) {};
explicit Var(std::string s) : m_Vals(s), m_Type(VarTypes::String) {};
- //Disallow implicit conversions from types not explicitly allowed below.
+ /// Disallow implicit conversions from types not explicitly allowed below.
template<typename DisallowedType>
Var(DisallowedType)
{
String,
};
- // Union of potential type values.
+ /// Union of potential type values.
union Vals
{
int i;
protected:
using FactoryStorage = std::unordered_map<BackendId, FactoryFunction>;
- // For testing only
+ /// For testing only
static void Swap(BackendRegistry& instance, FactoryStorage& other);
private:
using ConcatDescriptor = OriginsDescriptor;
using DepthToSpaceDescriptor = SpaceToDepthDescriptor;
using LogSoftmaxDescriptor = SoftmaxDescriptor;
-// MergerDescriptor is deprecated, use ConcatDescriptor instead
+/// MergerDescriptor is deprecated, use ConcatDescriptor instead
using MergerDescriptor = OriginsDescriptor;
using SplitterDescriptor = ViewsDescriptor;
public:
explicit Exception(const std::string& message);
- // exception with context
+ /// exception with context
explicit Exception(const std::string& message,
const CheckLocation& location);
- // preserving previous exception context
- // and adding local context information
+ /// preserving previous exception context
+ /// and adding local context information
explicit Exception(const Exception& other,
const std::string& message,
const CheckLocation& location);
/// It will also be updated with new tuned parameters if it is configured to do so.
std::shared_ptr<IGpuAccTunedParameters> m_GpuAccTunedParameters;
- // Setting this flag will allow the user to obtain GPU profiling information from the runtime.
+ /// Setting this flag will allow the user to obtain GPU profiling information from the runtime.
bool m_EnableGpuProfiling;
- // Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive
- // Only a single path is allowed for the override
+ /// Setting this value will override the paths set by the DYNAMIC_BACKEND_PATHS compiler directive
+ /// Only a single path is allowed for the override
std::string m_DynamicBackendsPath;
struct ExternalProfilingOptions
/// Pass backend specific options.
///
/// For example, to enable GpuAcc tuning add the following
+ /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.cpp
/// m_BackendOption.emplace_back(
/// BackendOptions{"GpuAcc",
/// {
/// {"TuningFile", filename}
/// }
/// });
+ /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Execute representative workloads through the runtime to generate tuning data.
/// The tuning file is written once the runtime is destroyed
/// To execute with the tuning data, start up with just the tuning file specified.
+ /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.cpp
/// m_BackendOption.emplace_back(
/// BackendOptions{"GpuAcc",
/// {
/// {"TuningFile", filename}
/// }
/// });
+ /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// The following backend options are available:
/// GpuAcc:
static void Apply(const std::string&) {}
};
-// Visitor base class with empty implementations.
+/// Visitor base class with empty implementations.
template<typename DefaultPolicy>
class LayerVisitorBase : public ILayerVisitor
{
#include <cstring>
#include <type_traits>
-// Optional is a drop in replacement for std::optional until we migrate
-// to c++-17. Only a subset of the optional features are implemented that
-// we intend to use in ArmNN.
+/// Optional is a drop in replacement for std::optional until we migrate
+/// to c++-17. Only a subset of the optional features are implemented that
+/// we intend to use in ArmNN.
-// There are two distinct implementations here:
-//
-// 1, for normal constructable/destructable types and reference types
-// 2, for reference types
+/// There are two distinct implementations here:
+///
+/// 1, for normal constructable/destructable types and reference types
+/// 2, for reference types
-// The std::optional features we support are:
-//
-// - has_value() and operator bool() to tell if the optional has a value
-// - value() returns a reference to the held object
-//
+/// The std::optional features we support are:
+///
+/// - has_value() and operator bool() to tell if the optional has a value
+/// - value() returns a reference to the held object
+///
namespace armnn
{
-// EmptyOptional is used to initialize the Optional class in case we want
-// to have default value for an Optional in a function declaration.
+/// EmptyOptional is used to initialize the Optional class in case we want
+/// to have default value for an Optional in a function declaration.
struct EmptyOptional {};
-// Disambiguation tag that can be passed to the constructor to indicate that
-// the contained object should be constructed in-place
+/// Disambiguation tag that can be passed to the constructor to indicate that
+/// the contained object should be constructed in-place
struct ConstructInPlace
{
explicit ConstructInPlace() = default;
#define CONSTRUCT_IN_PLACE armnn::ConstructInPlace{}
-// OptionalBase is the common functionality between reference and non-reference
-// optional types.
+/// OptionalBase is the common functionality between reference and non-reference
+/// optional types.
class OptionalBase
{
public:
bool m_HasValue;
};
-//
-// The default implementation is the non-reference case. This
-// has an unsigned char array for storing the optional value which
-// is in-place constructed there.
-//
+///
+/// The default implementation is the non-reference case. This
+/// has an unsigned char array for storing the optional value which
+/// is in-place constructed there.
+///
template <bool IsReference, typename T>
class OptionalReferenceSwitch : public OptionalBase
{
alignas(alignof(T)) unsigned char m_Storage[sizeof(T)];
};
-//
-// This is the special case for reference types. This holds a pointer
-// to the referenced type. This doesn't own the referenced memory and
-// it never calls delete on the pointer.
-//
+///
+/// This is the special case for reference types. This holds a pointer
+/// to the referenced type. This doesn't own the referenced memory and
+/// it never calls delete on the pointer.
+///
template <typename T>
class OptionalReferenceSwitch<true, T> : public OptionalBase
{
}
};
-// Utility template that constructs an object of type T in-place and wraps
-// it inside an Optional<T> object
+/// Utility template that constructs an object of type T in-place and wraps
+/// it inside an Optional<T> object
template<typename T, class... Args>
Optional<T> MakeOptional(Args&&... args)
{
MemoryType GetMemoryArea() const { return m_MemoryArea; }
protected:
- // Protected destructor to stop users from making these
- // (could still new one on the heap and then leak it...)
+ /// Protected destructor to stop users from making these
+ /// (could still new one on the heap and then leak it...)
~BaseTensor() {}
MemoryType m_MemoryArea;
constexpr unsigned int MaxNumOfTensorDimensions = 5U;
-// The lowest performance data capture interval we support is 10 miliseconds.
+/// The lowest performance data capture interval we support is 10 miliseconds.
constexpr unsigned int LOWEST_CAPTURE_PERIOD = 10000u;
/// @enum Status enumeration
namespace std
{
-// make ProfilingGuid hashable
+/// make ProfilingGuid hashable
template<>
struct hash<armnn::profiling::ProfilingGuid>
{
}
};
-// make ProfilingDynamicGuid hashable
+/// make ProfilingDynamicGuid hashable
template<>
struct hash<armnn::profiling::ProfilingDynamicGuid>
{
}
};
-// make ProfilingStaticGuid hashable
+/// make ProfilingStaticGuid hashable
template<>
struct hash<armnn::profiling::ProfilingStaticGuid>
{
#pragma once
-// Macro utils
+/// Macro utils
#define STRINGIFY_VALUE(s) STRINGIFY_MACRO(s)
#define STRINGIFY_MACRO(s) #s
#define CONCAT_VALUE(a, b, c) CONCAT_MACRO(a, b, c)
#define ARMNN_MINOR_VERSION 02
#define ARMNN_PATCH_VERSION 00
-// ARMNN_VERSION: "YYYYMMPP"
-// where:
-// YYYY = 4-digit year number
-// MM = 2-digit month number
-// PP = 2-digit patch number
+/// ARMNN_VERSION: "YYYYMMPP"
+/// where:
+/// YYYY = 4-digit year number
+/// MM = 2-digit month number
+/// PP = 2-digit patch number
#define ARMNN_VERSION "20" STRINGIFY_VALUE(CONCAT_VALUE(ARMNN_MAJOR_VERSION, ARMNN_MINOR_VERSION, ARMNN_PATCH_VERSION))
IBackendContext(const IRuntime::CreationOptions&) {}
public:
- // Before and after Load network events
+ /// Before and after Load network events
virtual bool BeforeLoadNetwork(NetworkId networkId) = 0;
virtual bool AfterLoadNetwork(NetworkId networkId) = 0;
- // Before and after Unload network events
+ /// Before and after Unload network events
virtual bool BeforeUnloadNetwork(NetworkId networkId) = 0;
virtual bool AfterUnloadNetwork(NetworkId networkId) = 0;
class IBackendInternal : public IBackend
{
protected:
- // Creation must be done through a specific
- // backend interface.
+ /// Creation must be done through a specific
+ /// backend interface.
IBackendInternal() = default;
public:
- // Allow backends created by the factory function
- // to be destroyed through IBackendInternal.
+ /// Allow backends created by the factory function
+ /// to be destroyed through IBackendInternal.
~IBackendInternal() override = default;
using IWorkloadFactoryPtr = std::unique_ptr<IWorkloadFactory>;
using IBackendContextPtr = std::unique_ptr<IBackendContext>;
- // This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
+ /// This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
using IBackendProfilingContextPtr = std::shared_ptr<armnn::profiling::IBackendProfilingContext>;
using IBackendProfilingPtr = std::unique_ptr<armnn::profiling::IBackendProfiling>;
using OptimizationPtr = std::unique_ptr<Optimization>;
/// \return a TensorShape filled with the number of elements for each dimension.
virtual TensorShape GetShape() const = 0;
- // Testing support to be able to verify and set tensor data content
+ /// Testing support to be able to verify and set tensor data content
virtual void CopyOutTo(void* memory) const = 0;
virtual void CopyInFrom(const void* memory) = 0;
{
public:
using FactoryId = std::string;
- static const FactoryId LegacyFactoryId; // Use the workload factory to create the tensor handle
- static const FactoryId DeferredFactoryId; // Some TensorHandleFactory decisions are deferred to run-time
+ static const FactoryId LegacyFactoryId; /// Use the workload factory to create the tensor handle
+ static const FactoryId DeferredFactoryId; /// Some TensorHandleFactory decisions are deferred to run-time
virtual ~ITensorHandleFactory() {}
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const = 0;
- // Utility Functions for backends which require TensorHandles to have unmanaged memory.
- // These should be overloaded if required to facilitate direct import of input tensors
- // and direct export of output tensors.
+ /// Utility Functions for backends which require TensorHandles to have unmanaged memory.
+ /// These should be overloaded if required to facilitate direct import of input tensors
+ /// and direct export of output tensors.
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
namespace armnnUtils
{
-// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout
+/// Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout
class DataLayoutIndexed
{
public:
BOOST_ASSERT( widthIndex < shape[m_WidthIndex] ||
( shape[m_WidthIndex] == 0 && widthIndex == 0) );
- // Offset the given indices appropriately depending on the data layout
+ /// Offset the given indices appropriately depending on the data layout
switch (m_DataLayout)
{
case armnn::DataLayout::NHWC:
batchIndex *= shape[1] * shape[2] * shape[3]; // batchIndex *= heightIndex * widthIndex * channelIndex
heightIndex *= shape[m_WidthIndex] * shape[m_ChannelsIndex];
widthIndex *= shape[m_ChannelsIndex];
- // channelIndex stays unchanged
+ /// channelIndex stays unchanged
break;
case armnn::DataLayout::NCHW:
default:
batchIndex *= shape[1] * shape[2] * shape[3]; // batchIndex *= heightIndex * widthIndex * channelIndex
channelIndex *= shape[m_HeightIndex] * shape[m_WidthIndex];
heightIndex *= shape[m_WidthIndex];
- // widthIndex stays unchanged
+ /// widthIndex stays unchanged
break;
}
- // Get the value using the correct offset
+ /// Get the value using the correct offset
return batchIndex + channelIndex + heightIndex + widthIndex;
}
unsigned int m_WidthIndex;
};
-// Equality methods
+/// Equality methods
bool operator==(const armnn::DataLayout& dataLayout, const DataLayoutIndexed& indexed);
bool operator==(const DataLayoutIndexed& indexed, const armnn::DataLayout& dataLayout);
class FloatingPointConverter
{
public:
- // Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
- // dstFloat16Buffer should be (numElements * 2) in size
+ /// Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
+ /// dstFloat16Buffer should be (numElements * 2) in size
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer);
static void ConvertFloat16To32(const void *srcFloat16Buffer, size_t numElements, float *dstFloat32Buffer);