2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
11 #include <boost/assert.hpp>
12 #include <boost/numeric/conversion/cast.hpp>
18 constexpr char const* GetStatusAsCString(Status status)
22 case armnn::Status::Success: return "Status::Success";
23 case armnn::Status::Failure: return "Status::Failure";
24 default: return "Unknown";
28 constexpr char const* GetActivationFunctionAsCString(ActivationFunction activation)
32 case ActivationFunction::Sigmoid: return "Sigmoid";
33 case ActivationFunction::TanH: return "TanH";
34 case ActivationFunction::Linear: return "Linear";
35 case ActivationFunction::ReLu: return "ReLu";
36 case ActivationFunction::BoundedReLu: return "BoundedReLu";
37 case ActivationFunction::SoftReLu: return "SoftReLu";
38 case ActivationFunction::LeakyReLu: return "LeakyReLu";
39 case ActivationFunction::Abs: return "Abs";
40 case ActivationFunction::Sqrt: return "Sqrt";
41 case ActivationFunction::Square: return "Square";
42 default: return "Unknown";
46 constexpr char const* GetPoolingAlgorithmAsCString(PoolingAlgorithm pooling)
50 case PoolingAlgorithm::Average: return "Average";
51 case PoolingAlgorithm::Max: return "Max";
52 case PoolingAlgorithm::L2: return "L2";
53 default: return "Unknown";
57 constexpr char const* GetOutputShapeRoundingAsCString(OutputShapeRounding rounding)
61 case OutputShapeRounding::Ceiling: return "Ceiling";
62 case OutputShapeRounding::Floor: return "Floor";
63 default: return "Unknown";
68 constexpr char const* GetPaddingMethodAsCString(PaddingMethod method)
72 case PaddingMethod::Exclude: return "Exclude";
73 case PaddingMethod::IgnoreValue: return "IgnoreValue";
74 default: return "Unknown";
78 constexpr unsigned int GetDataTypeSize(DataType dataType)
82 case DataType::Float16: return 2U;
83 case DataType::Float32:
84 case DataType::Signed32: return 4U;
85 case DataType::QuantisedAsymm8: return 1U;
86 case DataType::Boolean: return 1U;
92 constexpr bool StrEqual(const char* strA, const char (&strB)[N])
95 for (unsigned i = 0; isEqual && (i < N); ++i)
97 isEqual = (strA[i] == strB[i]);
102 /// Deprecated function that will be removed together with
104 constexpr armnn::Compute ParseComputeDevice(const char* str)
106 if (armnn::StrEqual(str, "CpuAcc"))
108 return armnn::Compute::CpuAcc;
110 else if (armnn::StrEqual(str, "CpuRef"))
112 return armnn::Compute::CpuRef;
114 else if (armnn::StrEqual(str, "GpuAcc"))
116 return armnn::Compute::GpuAcc;
120 return armnn::Compute::Undefined;
124 constexpr const char* GetDataTypeName(DataType dataType)
128 case DataType::Float16: return "Float16";
129 case DataType::Float32: return "Float32";
130 case DataType::QuantisedAsymm8: return "Unsigned8";
131 case DataType::Signed32: return "Signed32";
138 constexpr const char* GetDataLayoutName(DataLayout dataLayout)
142 case DataLayout::NCHW: return "NCHW";
143 case DataLayout::NHWC: return "NHWC";
144 default: return "Unknown";
151 : std::integral_constant<bool, std::is_floating_point<T>::value && sizeof(T) == 2>
155 constexpr bool IsQuantizedType()
157 return std::is_integral<T>::value;
160 inline std::ostream& operator<<(std::ostream& os, Status stat)
162 os << GetStatusAsCString(stat);
167 inline std::ostream & operator<<(std::ostream & os, const armnn::TensorShape & shape)
170 for (uint32_t i=0; i<shape.GetNumDimensions(); ++i)
182 /// Quantize a floating point data type into an 8-bit data type.
183 /// @param value - The value to quantize.
184 /// @param scale - The scale (must be non-zero).
185 /// @param offset - The offset.
186 /// @return - The quantized value calculated as round(value/scale)+offset.
188 template<typename QuantizedType>
189 inline QuantizedType Quantize(float value, float scale, int32_t offset)
191 // TODO : check we act sensibly for Inf, NaN and -Inf
193 static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
194 constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
195 constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
196 BOOST_ASSERT(scale != 0.f);
197 int quantized = boost::numeric_cast<int>(round(value / scale)) + offset;
198 QuantizedType quantizedBits = quantized <= min
202 : static_cast<QuantizedType>(quantized);
203 return quantizedBits;
206 /// Dequantize an 8-bit data type into a floating point data type.
207 /// @param value - The value to dequantize.
208 /// @param scale - The scale (must be non-zero).
209 /// @param offset - The offset.
210 /// @return - The dequantized value calculated as (value-offset)*scale.
212 template <typename QuantizedType>
213 inline float Dequantize(QuantizedType value, float scale, int32_t offset)
215 static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
216 BOOST_ASSERT(scale != 0.f);
217 float dequantized = boost::numeric_cast<float>(value - offset) * scale;
221 template <armnn::DataType DataType>
222 void VerifyTensorInfoDataType(const armnn::TensorInfo & info)
224 if (info.GetDataType() != DataType)
226 std::stringstream ss;
227 ss << "Unexpected datatype:" << armnn::GetDataTypeName(info.GetDataType())
228 << " for tensor:" << info.GetShape()
229 << ". The type expected to be: " << armnn::GetDataTypeName(DataType);
230 throw armnn::Exception(ss.str());