53 return std::make_pair(permuted_shape, perm);
64 const float value = *
reinterpret_cast<float *
>(tensor.
ptr_to_element(
id));
65 float res = value / 255.f;
66 res = (res - 0.5f) * 2.f;
72 : _mean(mean), _bgr(bgr)
87 const float value = *
reinterpret_cast<float *
>(tensor.
ptr_to_element(
id)) - _mean[
id.z()];
93 : _name(std::move(name)), _iterator(0), _maximum(maximum)
100 ss << _name << _iterator <<
".ppm";
109 return _iterator < _maximum;
113 : _iterator(0), _maximum(maximum)
120 bool ret = _maximum == 0 || _iterator < _maximum;
121 if(_iterator == _maximum)
133 : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
144 template <
typename T>
145 void NumPyAccessor::access_numpy_tensor(
ITensor &tensor)
148 int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor);
149 float percentage_mismatches =
static_cast<float>(num_mismatches) / num_elements;
151 _output_stream <<
"Results: " << 100.f - (percentage_mismatches * 100) <<
" % matches with the provided output[" << _filename <<
"]." << std::endl;
162 access_numpy_tensor<float>(tensor);
172 : _ppm_path(std::move(ppm_path)), _bgr(bgr), _preprocessor(std::move(preprocessor))
191 "Failed to load image file: dimensions [%d,%d] not correct, expected [%d,%d].", ppm.
width(), ppm.
height(), permuted_shape.
x(), permuted_shape.
y());
199 _preprocessor->preprocess(tensor);
206 : _labels(), _output_stream(output_stream), _top_n(top_n)
214 ifs.exceptions(std::ifstream::badbit);
215 ifs.open(labels_path, std::ios::in | std::ios::binary);
217 for(std::string line; !std::getline(ifs, line).fail();)
219 _labels.emplace_back(line);
222 catch(
const std::ifstream::failure &e)
228 template <
typename T>
229 void TopNPredictionsAccessor::access_predictions_tensor(
ITensor &tensor)
232 std::vector<T> classes_prob;
233 std::vector<size_t> index;
238 classes_prob.resize(num_classes);
239 index.resize(num_classes);
241 std::copy(output_net, output_net + num_classes, classes_prob.begin());
244 std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
245 std::sort(std::begin(index), std::end(index),
246 [&](
size_t a,
size_t b)
248 return classes_prob[
a] > classes_prob[
b];
251 _output_stream <<
"---------- Top " << _top_n <<
" predictions ----------" << std::endl
253 for(
size_t i = 0; i < _top_n; ++i)
255 _output_stream << std::fixed << std::setprecision(4)
256 << +classes_prob[index.at(i)]
257 <<
" - [id = " << index.at(i) <<
"]" 258 <<
", " << _labels[index.at(i)] << std::endl;
270 access_predictions_tensor<uint8_t>(tensor);
273 access_predictions_tensor<float>(tensor);
283 : _lower(lower), _upper(upper), _seed(seed)
287 template <
typename T,
typename D>
288 void RandomAccessor::fill(
ITensor &tensor, D &&distribution)
290 std::mt19937 gen(_seed);
296 const T value = distribution(gen);
297 *
reinterpret_cast<T *
>(tensor.
buffer() +
offset) = value;
308 const T value = distribution(gen);
320 std::uniform_int_distribution<uint8_t> distribution_u8(_lower.
get<uint8_t>(), _upper.
get<uint8_t>());
321 fill<uint8_t>(tensor, distribution_u8);
327 std::uniform_int_distribution<int8_t> distribution_s8(_lower.
get<int8_t>(), _upper.
get<int8_t>());
328 fill<int8_t>(tensor, distribution_s8);
333 std::uniform_int_distribution<uint16_t> distribution_u16(_lower.
get<uint16_t>(), _upper.
get<uint16_t>());
334 fill<uint16_t>(tensor, distribution_u16);
340 std::uniform_int_distribution<int16_t> distribution_s16(_lower.
get<int16_t>(), _upper.
get<int16_t>());
341 fill<int16_t>(tensor, distribution_s16);
346 std::uniform_int_distribution<uint32_t> distribution_u32(_lower.
get<uint32_t>(), _upper.
get<uint32_t>());
347 fill<uint32_t>(tensor, distribution_u32);
352 std::uniform_int_distribution<int32_t> distribution_s32(_lower.
get<int32_t>(), _upper.
get<int32_t>());
353 fill<int32_t>(tensor, distribution_s32);
358 std::uniform_int_distribution<uint64_t> distribution_u64(_lower.
get<uint64_t>(), _upper.
get<uint64_t>());
359 fill<uint64_t>(tensor, distribution_u64);
364 std::uniform_int_distribution<int64_t> distribution_s64(_lower.
get<int64_t>(), _upper.
get<int64_t>());
365 fill<int64_t>(tensor, distribution_s64);
370 std::uniform_real_distribution<float> distribution_f16(_lower.
get<
float>(), _upper.
get<
float>());
371 fill<float>(tensor, distribution_f16);
376 std::uniform_real_distribution<float> distribution_f32(_lower.
get<
float>(), _upper.
get<
float>());
377 fill<float>(tensor, distribution_f32);
382 std::uniform_real_distribution<double> distribution_f64(_lower.
get<
double>(), _upper.
get<
double>());
383 fill<double>(tensor, distribution_f64);
393 : _filename(std::move(filename)), _file_layout(file_layout)
400 std::vector<unsigned long>
shape;
403 std::ifstream stream(_filename, std::ios::in | std::ios::binary);
405 std::string header = npy::read_header(stream);
408 bool fortran_order =
false;
410 npy::parse_header(header, typestr, fortran_order, shape);
419 std::reverse(shape.begin(), shape.end());
425 for(
int i = static_cast<int>(shape.size()) - 1; i > 0; --i)
438 bool are_layouts_different = (_file_layout != tensor.
info()->
data_layout());
446 if(are_layouts_different)
448 std::tie(permuted_shape, perm) = compute_permutation_paramaters(tensor_shape, tensor.
info()->
data_layout());
452 for(
size_t i = 0; i < shape.size(); ++i)
unsigned int height() const
Return the height of the currently open PPM file.
#define ARM_COMPUTE_ERROR(...)
Print the given message then throw an std::runtime_error.
void save_to_ppm(T &tensor, const std::string &ppm_filename)
Template helper function to save a tensor image to a PPM file.
constexpr bool empty() const
Check if the entire border is zero.
Class describing the value of a pixel for any image format.
quantized, symmetric fixed-point 16-bit number
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
quantized, symmetric fixed-point 8-bit number
virtual size_t dimension(size_t index) const =0
Return the size of the requested dimension.
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
1 channel, 1 U8 per channel
virtual DataType data_type() const =0
Data type used for each element of the tensor.
1 channel, 1 F32 per channel
Strides PermutationVector
Permutation vector.
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
#define ARM_COMPUTE_ERROR_ON(cond)
If the condition is true then an error message is printed and an exception thrown.
void swap(SimpleTensor< U > &tensor1, SimpleTensor< U > &tensor2)
1 channel, 1 U16 per channel
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
RandomAccessor(PixelValue lower, PixelValue upper, const std::random_device::result_type seed=0)
Constructor.
SimpleTensor< T > copy(const SimpleTensor< T > &src, const TensorShape &output_shape)
__global uchar * offset(const Image *img, int x, int y)
Get the pointer position of a Image.
Interface for NEON tensor.
void use_tensor_dimensions(const TensorShape &shape, size_t first_dimension=Window::DimX)
Use the tensor's dimensions to fill the window dimensions.
src info() -> set_format(Format::S16)
1 channel, 1 F16 per channel
TensorAllocator * allocator()
Return a pointer to the tensor's allocator.
T x() const
Alias to access the size of the first dimension.
void permute(Dimensions< T > &dimensions, const PermutationVector &perm)
Permutes given Dimensions according to a permutation vector.
PPMAccessor(std::string ppm_path, bool bgr=true, std::unique_ptr< IPreprocessor > preprocessor=nullptr)
Constructor.
1 channel, 1 S32 per channel
Basic implementation of the sub-tensor interface.
std::string get_typestring(DataType data_type)
Obtain numpy type string from DataType.
void get(uint8_t &v) const
Interpret the pixel value as a U8.
void open(const std::string &ppm_filename)
Open a PPM file and reads its metadata (Width, height)
Numpy Binary loader class.
#define ARM_COMPUTE_UNUSED(...)
To avoid unused variables warnings.
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
uint8_t * ptr_to_element(const Coordinates &id) const
Return a pointer to the element at the passed coordinates.
1 channel, 1 U32 per channel
virtual const TensorShape & tensor_shape() const =0
Size for each dimension of the tensor.
void execute_window_loop(const Window &w, L &&lambda_function, Ts &&...iterators)
Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...
CaffePreproccessor(std::array< float, 3 > mean=std::array< float, 3 >{{0, 0, 0}}, bool bgr=true)
Default Constructor.
quantized, asymmetric fixed-point 8-bit number
virtual uint8_t * buffer() const =0
Interface to be implemented by the child class to return a pointer to CPU memory. ...
void allocate() override
Allocate size specified by TensorInfo of CPU memory.
virtual ITensorInfo * info() const =0
Interface to be implemented by the child class to return the tensor's metadata.
virtual size_t element_size() const =0
Element size in bytes calculated as data_size() * num_channels()
virtual PaddingSize padding() const =0
Padding of tensor.
1 channel, 1 S16 per channel
TopNPredictionsAccessor(const std::string &labels_path, size_t top_n=5, std::ostream &output_stream=std::cout)
Constructor.
Num samples, channels, height, width.
#define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)
NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream=std::cout)
Constructor.
Strides of an item in bytes.
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
virtual size_t offset_first_element_in_bytes() const =0
The offset from the beginning of the memory allocation to the first element of the tensor...
void preprocess(ITensor &tensor) override
Preprocess the given tensor.
virtual size_t total_size() const =0
Returns the total size of the tensor in bytes.
T y() const
Alias to access the size of the second dimension.
unsigned int num_dimensions() const
Returns the effective dimensionality of the tensor.
void fill_planar_tensor(T &tensor, bool bgr=false)
Fill a tensor with 3 planes (one for each channel) with the content of the currently open PPM file...
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
Num samples, height, width, channels.
void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo sub_info)
Shares the same backing memory with another tensor allocator, while the tensor info might be differen...
Store the tensor's metadata.
DummyAccessor(unsigned int maximum=1)
Constructor.
PPMWriter(std::string name, unsigned int maximum=1)
Constructor.
bool access_tensor(ITensor &tensor) override
Interface to be implemented to access a given tensor.
64-bit floating-point number
unsigned int width() const
Return the width of the currently open PPM file.
DataType
Available data types.
NumPyBinLoader(std::string filename, DataLayout file_layout=DataLayout::NCHW)
Default Constructor.
DataLayout
Supported tensor data layouts.
Describe a multidimensional execution window.
ITensorInfo * info() const override
Interface to be implemented by the child class to return the tensor's metadata.
Class to load the content of a PPM file into an Image.
virtual DataLayout data_layout() const =0
Get the data layout of the tensor.
#define ARM_COMPUTE_ERROR_ON_MSG(cond,...)