src/armnn/Profiling.cpp \
src/armnn/JsonPrinter.cpp \
src/armnn/Tensor.cpp \
+ src/armnn/TypesUtils.cpp \
src/armnn/Utils.cpp \
src/armnn/LayerSupport.cpp \
src/armnn/Observable.cpp
src/armnn/Runtime.hpp
src/armnn/RangeTracker.cpp
src/armnn/RangeTracker.hpp
+ src/armnn/ResolveType.hpp
src/armnn/SerializeLayerParameters.cpp
src/armnn/SerializeLayerParameters.hpp
src/armnn/StaticRangeVisitor.cpp
src/armnn/SubGraphSelector.cpp
src/armnn/SubGraphSelector.hpp
src/armnn/Tensor.cpp
- src/armnn/TypeUtils.hpp
+ src/armnn/TypesUtils.cpp
src/armnn/Utils.cpp
src/armnn/WallClockTimer.cpp
src/armnn/WallClockTimer.hpp
#include "Tensor.hpp"
#include "Types.hpp"
-#include <boost/assert.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <cmath>
#include <ostream>
#include <set>
/// @return - The quantized value calculated as round(value/scale)+offset.
///
template<typename QuantizedType>
-inline QuantizedType Quantize(float value, float scale, int32_t offset)
-{
- static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
- constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
- constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
-
- float clampedValue = std::min(std::max(static_cast<float>(round(value/scale) + offset), static_cast<float>(min)),
- static_cast<float>(max));
- auto quantizedBits = static_cast<QuantizedType>(clampedValue);
-
- return quantizedBits;
-}
+QuantizedType Quantize(float value, float scale, int32_t offset);
/// Dequantize an 8-bit data type into a floating point data type.
/// @param value - The value to dequantize.
/// @return - The dequantized value calculated as (value-offset)*scale.
///
template <typename QuantizedType>
-inline float Dequantize(QuantizedType value, float scale, int32_t offset)
-{
- static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
- float dequantized = boost::numeric_cast<float>(value - offset) * scale;
- return dequantized;
-}
+float Dequantize(QuantizedType value, float scale, int32_t offset);
inline void VerifyTensorInfoDataType(const armnn::TensorInfo & info, armnn::DataType dataType)
{
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include <armnn/TypesUtils.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+template<typename QuantizedType>
+QuantizedType armnn::Quantize(float value, float scale, int32_t offset)
+{
+ static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
+ constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
+ constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
+ BOOST_ASSERT(scale != 0.f);
+ BOOST_ASSERT(!std::isnan(value));
+
+ float clampedValue = std::min(std::max(static_cast<float>(round(value/scale) + offset), static_cast<float>(min)),
+ static_cast<float>(max));
+ auto quantizedBits = static_cast<QuantizedType>(clampedValue);
+
+ return quantizedBits;
+}
+
+template <typename QuantizedType>
+float armnn::Dequantize(QuantizedType value, float scale, int32_t offset)
+{
+ static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
+ BOOST_ASSERT(scale != 0.f);
+ BOOST_ASSERT(!std::isnan(value));
+ float dequantized = boost::numeric_cast<float>(value - offset) * scale;
+ return dequantized;
+}
+
+/// Explicit specialization of Quantize for uint8_t
+template
+uint8_t armnn::Quantize<uint8_t>(float value, float scale, int32_t offset);
+
+/// Explicit specialization of Quantize for int16_t
+template
+int16_t armnn::Quantize<int16_t>(float value, float scale, int32_t offset);
+
+/// Explicit specialization of Quantize for int32_t
+template
+int32_t armnn::Quantize<int32_t>(float value, float scale, int32_t offset);
+
+/// Explicit specialization of Dequantize for uint8_t
+template
+float armnn::Dequantize<uint8_t>(uint8_t value, float scale, int32_t offset);
+
+/// Explicit specialization of Dequantize for int16_t
+template
+float armnn::Dequantize<int16_t>(int16_t value, float scale, int32_t offset);
+
+/// Explicit specialization of Dequantize for int32_t
+template
+float armnn::Dequantize<int32_t>(int32_t value, float scale, int32_t offset);
\ No newline at end of file
#include <Graph.hpp>
#include <DataLayoutIndexed.hpp>
#include <Network.hpp>
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
#include <utility>
#include <armnn/Descriptors.hpp>
#include <GraphTopologicalSort.hpp>
#include <Graph.hpp>
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
BOOST_AUTO_TEST_SUITE(Utils)
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/log/trivial.hpp>
+#include <boost/format.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <boost/polymorphic_cast.hpp>
// The generated code based on the Serialize schema:
#include <boost/assert.hpp>
#include <boost/format.hpp>
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "test/TensorHelpers.hpp"
#include "flatbuffers/idl.h"
#include <armnn/Utils.hpp>
#include <VerificationHelpers.hpp>
+#include <boost/format.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include <boost/format.hpp>
-
#include <numeric>
using namespace armnn;
#include <ArmnnSchema_generated.h>
+#include <boost/numeric/conversion/cast.hpp>
+
#include <flatbuffers/util.h>
using namespace armnn;
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/log/trivial.hpp>
+#include <boost/format.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <fstream>
#include <algorithm>
#include "test/TensorHelpers.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include <backendsCommon/BackendRegistry.hpp>
#include <boost/format.hpp>
#include <boost/core/ignore_unused.hpp>
+#include <boost/format.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <boost/polymorphic_cast.hpp>
#include <numeric>
//
#pragma once
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
#include <backendsCommon/IBackendInternal.hpp>
#include "CpuTensorHandle.hpp"
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
#include <boost/cast.hpp>
#include <sstream>
#include <boost/format.hpp>
+#include <boost/numeric/conversion/cast.hpp>
using namespace armnnUtils;
//
#pragma once
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
//
#pragma once
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "WorkloadTestUtils.hpp"
#include <armnn/ArmNN.hpp>
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include <Permute.hpp>
#include <DataLayoutIndexed.hpp>
#pragma once
#include "TensorCopyUtils.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "WorkloadTestUtils.hpp"
#include <armnn/Types.hpp>
#include <armnn/INetwork.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
namespace{
//
#pragma once
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
// SPDX-License-Identifier: MIT
//
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "WorkloadTestUtils.hpp"
#include <backendsCommon/IBackendInternal.hpp>
#include <armnn/INetwork.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
namespace{
#include "LayerTests.hpp"
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "test/TensorHelpers.hpp"
#include "TensorCopyUtils.hpp"
//
#pragma once
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
#include <initializer_list>
#include <iterator>
#include <vector>
+
#include <boost/core/ignore_unused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
template<typename T, bool DoQuantize=true>
struct SelectiveQuantizer
//
#pragma once
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "WorkloadTestUtils.hpp"
#include <armnn/ArmNN.hpp>
#include "ClDepthwiseConvolutionWorkload.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include "ClWorkloadUtils.hpp"
#include <aclCommon/ArmComputeUtils.hpp>
#pragma once
#include <armnn/ArmNN.hpp>
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
namespace armnn
{
#include <armnn/ArmNN.hpp>
+#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <algorithm>
#include <backendsCommon/WorkloadData.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
namespace armnn
{
#include "Debug.hpp"
#include "RefWorkloadUtils.hpp"
-#include <TypeUtils.hpp>
+#include <ResolveType.hpp>
#include <cstring>
#include "Profiling.hpp"
#include "RefWorkloadUtils.hpp"
#include "StringMapping.hpp"
-#include "TypeUtils.hpp"
-
+#include <ResolveType.hpp>
#include <vector>
namespace armnn
#include "Gather.hpp"
#include "Profiling.hpp"
#include "RefWorkloadUtils.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
namespace armnn
{
#include "Profiling.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
#include <vector>
#include "RefWorkloadUtils.hpp"
#include <Permute.hpp>
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
namespace armnn
{
#include "SpaceToBatchNd.hpp"
#include "RefWorkloadUtils.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
namespace armnn
{
#include "StridedSlice.hpp"
#include "RefWorkloadUtils.hpp"
-#include "TypeUtils.hpp"
+#include <ResolveType.hpp>
namespace armnn
{