* Floating point casts now use armnn::numeric_cast.
* Also removed remaining header imports.
Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I2d37847d67f164fc0a0ae17f34d49ff3d2210c30
#include "QuantizerVisitor.hpp"
#include "StaticRangeVisitor.hpp"
+#include <armnn/utility/NumericCast.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
for (size_t i = 0; i < backing.size(); ++i)
{
float fp32Value = static_cast<const float*>(biases.value().GetMemoryArea())[i];
- backing[i] = boost::numeric_cast<int32_t>(fp32Value * ( 1 / scale ));
+ backing[i] = armnn::numeric_cast<int32_t>(fp32Value * ( 1 / scale ));
}
return ConstTensor(qInfo, backing);
//
#include <armnn/TypesUtils.hpp>
#include <armnn/utility/Assert.hpp>
-
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
namespace
{
static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
ARMNN_ASSERT(scale != 0.f);
ARMNN_ASSERT(!IsNan(value));
- return (boost::numeric_cast<float>(value - offset)) * scale;
+ return (armnn::numeric_cast<float>(value - offset)) * scale;
}
/// Explicit specialization of Quantize for int8_t
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <fmt/format.h>
// Caffe
inputInfo.GetShape()[1],
static_cast<unsigned int>(ceil(
static_cast<float>(inputInfo.GetShape()[2] + 2 * pad_h - kernel_h) /
- boost::numeric_cast<float>(stride_h))) + 1,
+ armnn::numeric_cast<float>(stride_h))) + 1,
static_cast<unsigned int>(ceil(
static_cast<float>(inputInfo.GetShape()[3] + 2 * pad_w - kernel_w) /
- boost::numeric_cast<float>(stride_w))) + 1 },
+ armnn::numeric_cast<float>(stride_w))) + 1 },
DataType::Float32);
GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(poolingLayer->GetInputSlot(0));
if (param.has_alpha())
{
normalizationDescriptor.m_Alpha = param.alpha();
- normalizationDescriptor.m_Alpha /= boost::numeric_cast<float>(param.local_size());
+ normalizationDescriptor.m_Alpha /= armnn::numeric_cast<float>(param.local_size());
}
else
{
#include <backendsCommon/MemImportWorkload.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/cast.hpp>
-
#include <cstring>
namespace armnn
#include <backendsCommon/MemSyncWorkload.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/cast.hpp>
-
#include <cstring>
namespace armnn
#include <armnn/Logging.hpp>
#include <armnn/profiling/ISendTimelinePacket.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/unit_test.hpp>
#include <vector>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <boost/cast.hpp>
#include <boost/test/unit_test.hpp>
#include <utility>
#include <Optimizer.hpp>
#include <SubgraphViewSelector.hpp>
-#include <boost/cast.hpp>
-
#include <algorithm>
namespace
#include <test/TensorHelpers.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <string>
//
#include <test/TensorHelpers.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace
{
#include <cl/ClTensorHandle.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <cl/ClTensorHandle.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <cl/ClTensorHandle.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <cl/ClTensorHandle.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <cl/ClTensorHandle.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <arm_compute/runtime/Allocator.h>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/Workload.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <neon/workloads/NeonWorkloadUtils.hpp>
-#include <boost/cast.hpp>
-
namespace armnn
{
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <cmath>
#include <limits>
#include <BFloat16.hpp>
#include <Half.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <algorithm>
#include <iostream>
std::cout << "], ";
std::cout << "\"min\": "
- << boost::numeric_cast<float>(*std::min_element(inputData, inputData + numElements)) << ", ";
+ << static_cast<float>(*std::min_element(inputData, inputData + numElements)) << ", ";
std::cout << "\"max\": "
- << boost::numeric_cast<float>(*std::max_element(inputData, inputData + numElements)) << ", ";
+ << static_cast<float>(*std::max_element(inputData, inputData + numElements)) << ", ";
std::cout << "\"data\": ";
}
}
- std::cout << boost::numeric_cast<float>(inputData[i]);
+ std::cout << static_cast<float>(inputData[i]);
for (unsigned int j = 0; j < numDims; j++)
{
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <algorithm>
#include <numeric>
{
unsigned int boxCornorIndex = selectedBoxes[outputIndices[i]] * 4;
detectionScores[i] = selectedScores[outputIndices[i]];
- detectionClasses[i] = boost::numeric_cast<float>(selectedClasses[outputIndices[i]]);
+ detectionClasses[i] = armnn::numeric_cast<float>(selectedClasses[outputIndices[i]]);
detectionBoxes[boxIndex] = boxCorners[boxCornorIndex];
detectionBoxes[boxIndex + 1] = boxCorners[boxCornorIndex + 1];
detectionBoxes[boxIndex + 2] = boxCorners[boxCornorIndex + 2];
detectionBoxes[boxIndex + 3] = 0.0f;
}
}
- numDetections[0] = boost::numeric_cast<float>(numSelected);
+ numDetections[0] = armnn::numeric_cast<float>(numSelected);
}
void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <cmath>
#include <cstddef>
#include <functional>
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- ARMNN_ASSERT(boost::numeric_cast<float>(current) <
- (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
+ ARMNN_ASSERT(armnn::numeric_cast<float>(current) <
+ (std::numeric_limits<float>::max() / armnn::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
if (numElementsInAxis > 0) {
for (unsigned int idx = 0; idx < numOutputs; ++idx)
{
output[idx];
- output.Set(tempSum[idx] / boost::numeric_cast<float>(numElementsInAxis));
+ output.Set(tempSum[idx] / armnn::numeric_cast<float>(numElementsInAxis));
}
}
}
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <limits>
#include <algorithm>
#include <functional>
wend = std::min(wend, widthInput + padRight);
float result = defaultInitializer;
- float poolAreaSize = boost::numeric_cast<float>(height * (wend - wstart));
+ float poolAreaSize = armnn::numeric_cast<float>(height * (wend - wstart));
// Special case: when the pooling kernel is over a padding region and the padding
// size is larger or equal to the kernel and the kernel only covers
{
// When we exclude the padding, it means we calculate with a smaller
// kernel size, so I changed the divisor here.
- poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
+ poolAreaSize = armnn::numeric_cast<float>((hend - hstart) * (wend - wstart));
}
for (auto yInput = hstart; yInput < hend; yInput++)
#include "Profiling.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
namespace armnn
{
void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
{
float scale = (max - min) / 255.f;
- int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min));
+ int32_t offset = armnn::numeric_cast<int32_t>((-min * 255.f) / (max - min));
for (uint32_t i = 0; i < numElements; i++)
{
#include "TensorBufferArrayView.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cmath>
#include <algorithm>
inline double EuclideanDistance(float Xa, float Ya, const unsigned int Xb, const unsigned int Yb)
{
- return std::sqrt(pow(Xa - boost::numeric_cast<float>(Xb), 2) + pow(Ya - boost::numeric_cast<float>(Yb), 2));
+ return std::sqrt(pow(Xa - armnn::numeric_cast<float>(Xb), 2) + pow(Ya - armnn::numeric_cast<float>(Yb), 2));
}
inline float CalculateResizeScale(const unsigned int& InputSize,
const bool& AlignCorners)
{
return (AlignCorners && OutputSize > 1)
- ? boost::numeric_cast<float>(InputSize - 1) / boost::numeric_cast<float>(OutputSize - 1)
- : boost::numeric_cast<float>(InputSize) / boost::numeric_cast<float>(OutputSize);
+ ? armnn::numeric_cast<float>(InputSize - 1) / armnn::numeric_cast<float>(OutputSize - 1)
+ : armnn::numeric_cast<float>(InputSize) / armnn::numeric_cast<float>(OutputSize);
}
inline float PixelScaler(const unsigned int& Pixel,
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{
#include <Filesystem.hpp>
#include "../src/armnn/Profiling.hpp"
-#include <boost/numeric/conversion/cast.hpp>
#include <boost/format.hpp>
#include <boost/program_options.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <boost/program_options.hpp>
#include <fmt/format.h>
template <typename TDatabase, typename InferenceModel>
bool ClassifierTestCaseProvider<TDatabase, InferenceModel>::OnInferenceTestFinished()
{
- const double accuracy = boost::numeric_cast<double>(m_NumCorrectInferences) /
- boost::numeric_cast<double>(m_NumInferences);
+ const double accuracy = armnn::numeric_cast<double>(m_NumCorrectInferences) /
+ armnn::numeric_cast<double>(m_NumInferences);
ARMNN_LOG(info) << std::fixed << std::setprecision(3) << "Overall accuracy: " << accuracy;
// If a validation file was requested as output, the predictions are saved to it.
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <fmt/format.h>
#include <array>
// How much to scale pixel coordinates in the output image to get the corresponding pixel coordinates
// in the input image.
- const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
- const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
+ const float scaleY = armnn::numeric_cast<float>(inputHeight) / armnn::numeric_cast<float>(outputHeight);
+ const float scaleX = armnn::numeric_cast<float>(inputWidth) / armnn::numeric_cast<float>(outputWidth);
uint8_t rgb_x0y0[3];
uint8_t rgb_x1y0[3];
for (unsigned int y = 0; y < outputHeight; ++y)
{
// Corresponding real-valued height coordinate in input image.
- const float iy = boost::numeric_cast<float>(y) * scaleY;
+ const float iy = armnn::numeric_cast<float>(y) * scaleY;
// Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation).
const float fiy = floorf(iy);
- const unsigned int y0 = boost::numeric_cast<unsigned int>(fiy);
+ const unsigned int y0 = armnn::numeric_cast<unsigned int>(fiy);
// Interpolation weight (range [0,1])
const float yw = iy - fiy;
for (unsigned int x = 0; x < outputWidth; ++x)
{
// Real-valued and discrete width coordinates in input image.
- const float ix = boost::numeric_cast<float>(x) * scaleX;
+ const float ix = armnn::numeric_cast<float>(x) * scaleX;
const float fix = floorf(ix);
- const unsigned int x0 = boost::numeric_cast<unsigned int>(fix);
+ const unsigned int x0 = armnn::numeric_cast<unsigned int>(fix);
// Interpolation weight (range [0,1]).
const float xw = ix - fix;
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
#include <vector>
const std::vector<float>& output4 = mapbox::util::get<std::vector<float>>(this->GetOutputs()[3]);
ARMNN_ASSERT(output4.size() == k_OutputSize4);
- const size_t numDetections = boost::numeric_cast<size_t>(output4[0]);
+ const size_t numDetections = armnn::numeric_cast<size_t>(output4[0]);
// Check if number of valid detections matches expectations
const size_t expectedNumDetections = m_DetectedObjects.size();