2 * Copyright (c) 2016-2018 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef __UTILS_UTILS_H__
25 #define __UTILS_UTILS_H__
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/core/Validate.h"
31 #include "arm_compute/core/Window.h"
32 #include "arm_compute/runtime/Tensor.h"
33 #include "libnpy/npy.hpp"
34 #include "support/ToolchainSupport.h"
37 #include "arm_compute/core/CL/OpenCL.h"
38 #include "arm_compute/runtime/CL/CLDistribution1D.h"
39 #include "arm_compute/runtime/CL/CLTensor.h"
40 #endif /* ARM_COMPUTE_CL */
42 #include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
43 #endif /* ARM_COMPUTE_GC */
58 /** Abstract Example class.
60 * All examples have to inherit from this class.
65 virtual void do_setup(int argc, char **argv) {};
66 virtual void do_run() {};
67 virtual void do_teardown() {};
69 /** Default destructor. */
70 virtual ~Example() = default;
73 /** Run an example and handle the potential exceptions it throws
75 * @param[in] argc Number of command line arguments
76 * @param[in] argv Command line arguments
77 * @param[in] example Example to run
79 int run_example(int argc, char **argv, Example &example);
82 int run_example(int argc, char **argv)
85 return run_example(argc, argv, example);
88 /** Draw a RGB rectangular window for the detected object
90 * @param[in, out] tensor Input tensor where the rectangle will be drawn on. Format supported: RGB888
91 * @param[in] rect Geometry of the rectangular window
92 * @param[in] r Red colour to use
93 * @param[in] g Green colour to use
94 * @param[in] b Blue colour to use
96 void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::DetectionWindow &rect, uint8_t r, uint8_t g, uint8_t b);
98 /** Parse the ppm header from an input file stream. At the end of the execution,
99 * the file position pointer will be located at the first pixel stored in the ppm file
101 * @param[in] fs Input file stream to parse
103 * @return The width, height and max value stored in the header of the PPM file
105 std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs);
107 /** Parse the npy header from an input file stream. At the end of the execution,
108 * the file position pointer will be located at the first pixel stored in the npy file
110 * @param[in] fs Input file stream to parse
112 * @return The width and height stored in the header of the NPY file
114 std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs);
116 /** Obtain numpy type string from DataType.
118 * @param[in] data_type Data type.
120 * @return numpy type string.
122 inline std::string get_typestring(DataType data_type)
125 const unsigned int i = 1;
126 const char *c = reinterpret_cast<const char *>(&i);
127 std::string endianness;
130 endianness = std::string("<");
134 endianness = std::string(">");
136 const std::string no_endianness("|");
141 case DataType::QASYMM8:
142 return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
144 return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
146 return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
148 return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
150 return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
152 return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
154 return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
156 return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
158 return endianness + "f" + support::cpp11::to_string(sizeof(float));
160 return endianness + "f" + support::cpp11::to_string(sizeof(double));
161 case DataType::SIZET:
162 return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
164 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
168 /** Maps a tensor if needed
170 * @param[in] tensor Tensor to be mapped
171 * @param[in] blocking Specified if map is blocking or not
173 template <typename T>
174 inline void map(T &tensor, bool blocking)
176 ARM_COMPUTE_UNUSED(tensor);
177 ARM_COMPUTE_UNUSED(blocking);
180 /** Unmaps a tensor if needed
182 * @param tensor Tensor to be unmapped
184 template <typename T>
185 inline void unmap(T &tensor)
187 ARM_COMPUTE_UNUSED(tensor);
190 #ifdef ARM_COMPUTE_CL
191 /** Maps a tensor if needed
193 * @param[in] tensor Tensor to be mapped
194 * @param[in] blocking Specified if map is blocking or not
196 inline void map(CLTensor &tensor, bool blocking)
198 tensor.map(blocking);
201 /** Unmaps a tensor if needed
203 * @param tensor Tensor to be unmapped
205 inline void unmap(CLTensor &tensor)
210 /** Maps a distribution if needed
212 * @param[in] distribution Distribution to be mapped
213 * @param[in] blocking Specified if map is blocking or not
215 inline void map(CLDistribution1D &distribution, bool blocking)
217 distribution.map(blocking);
220 /** Unmaps a distribution if needed
222 * @param distribution Distribution to be unmapped
224 inline void unmap(CLDistribution1D &distribution)
226 distribution.unmap();
228 #endif /* ARM_COMPUTE_CL */
230 #ifdef ARM_COMPUTE_GC
231 /** Maps a tensor if needed
233 * @param[in] tensor Tensor to be mapped
234 * @param[in] blocking Specified if map is blocking or not
236 inline void map(GCTensor &tensor, bool blocking)
238 tensor.map(blocking);
241 /** Unmaps a tensor if needed
243 * @param tensor Tensor to be unmapped
245 inline void unmap(GCTensor &tensor)
249 #endif /* ARM_COMPUTE_GC */
251 /** Class to load the content of a PPM file into an Image
257 : _fs(), _width(0), _height(0)
260 /** Open a PPM file and reads its metadata (Width, height)
262 * @param[in] ppm_filename File to open
264 void open(const std::string &ppm_filename)
266 ARM_COMPUTE_ERROR_ON(is_open());
269 _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
270 _fs.open(ppm_filename, std::ios::in | std::ios::binary);
272 unsigned int max_val = 0;
273 std::tie(_width, _height, max_val) = parse_ppm_header(_fs);
275 ARM_COMPUTE_ERROR_ON_MSG(max_val >= 256, "2 bytes per colour channel not supported in file %s", ppm_filename.c_str());
277 catch(std::runtime_error &e)
279 ARM_COMPUTE_ERROR("Accessing %s: %s", ppm_filename.c_str(), e.what());
282 /** Return true if a PPM file is currently open
286 return _fs.is_open();
289 /** Initialise an image's metadata with the dimensions of the PPM file currently open
291 * @param[out] image Image to initialise
292 * @param[in] format Format to use for the image (Must be RGB888 or U8)
294 template <typename T>
295 void init_image(T &image, arm_compute::Format format)
297 ARM_COMPUTE_ERROR_ON(!is_open());
298 ARM_COMPUTE_ERROR_ON(format != arm_compute::Format::RGB888 && format != arm_compute::Format::U8);
300 // Use the size of the input PPM image
301 arm_compute::TensorInfo image_info(_width, _height, format);
302 image.allocator()->init(image_info);
305 /** Fill an image with the content of the currently open PPM file.
307 * @note If the image is a CLImage, the function maps and unmaps the image
309 * @param[in,out] image Image to fill (Must be allocated, and of matching dimensions with the opened PPM).
311 template <typename T>
312 void fill_image(T &image)
314 ARM_COMPUTE_ERROR_ON(!is_open());
315 ARM_COMPUTE_ERROR_ON(image.info()->dimension(0) != _width || image.info()->dimension(1) != _height);
316 ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&image, arm_compute::Format::U8, arm_compute::Format::RGB888);
319 // Map buffer if creating a CLTensor/GCTensor
322 // Check if the file is large enough to fill the image
323 const size_t current_position = _fs.tellg();
324 _fs.seekg(0, std::ios_base::end);
325 const size_t end_position = _fs.tellg();
326 _fs.seekg(current_position, std::ios_base::beg);
328 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < image.info()->tensor_shape().total_size() * image.info()->element_size(),
329 "Not enough data in file");
330 ARM_COMPUTE_UNUSED(end_position);
332 switch(image.info()->format())
334 case arm_compute::Format::U8:
336 // We need to convert the data from RGB to grayscale:
337 // Iterate through every pixel of the image
338 arm_compute::Window window;
339 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, _width, 1));
340 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
342 arm_compute::Iterator out(&image, window);
344 unsigned char red = 0;
345 unsigned char green = 0;
346 unsigned char blue = 0;
348 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
354 *out.ptr() = 0.2126f * red + 0.7152f * green + 0.0722f * blue;
360 case arm_compute::Format::RGB888:
362 // There is no format conversion needed: we can simply copy the content of the input file to the image one row at the time.
363 // Create a vertical window to iterate through the image's rows:
364 arm_compute::Window window;
365 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
367 arm_compute::Iterator out(&image, window);
369 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
371 // Copy one row from the input file to the current row of the image:
372 _fs.read(reinterpret_cast<std::fstream::char_type *>(out.ptr()), _width * image.info()->element_size());
379 ARM_COMPUTE_ERROR("Unsupported format");
382 // Unmap buffer if creating a CLTensor/GCTensor
385 catch(const std::ifstream::failure &e)
387 ARM_COMPUTE_ERROR("Loading PPM file: %s", e.what());
391 /** Fill a tensor with 3 planes (one for each channel) with the content of the currently open PPM file.
393 * @note If the image is a CLImage, the function maps and unmaps the image
395 * @param[in,out] tensor Tensor with 3 planes to fill (Must be allocated, and of matching dimensions with the opened PPM). Data types supported: U8/F32
396 * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
398 template <typename T>
399 void fill_planar_tensor(T &tensor, bool bgr = false)
401 ARM_COMPUTE_ERROR_ON(!is_open());
402 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::F32);
403 ARM_COMPUTE_ERROR_ON(tensor.info()->dimension(0) != _width || tensor.info()->dimension(1) != _height || tensor.info()->dimension(2) != 3);
407 // Map buffer if creating a CLTensor
410 // Check if the file is large enough to fill the image
411 const size_t current_position = _fs.tellg();
412 _fs.seekg(0, std::ios_base::end);
413 const size_t end_position = _fs.tellg();
414 _fs.seekg(current_position, std::ios_base::beg);
416 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size(),
417 "Not enough data in file");
418 ARM_COMPUTE_UNUSED(end_position);
420 // Iterate through every pixel of the image
421 arm_compute::Window window;
422 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, _width, 1));
423 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
424 window.set(arm_compute::Window::DimZ, arm_compute::Window::Dimension(0, 1, 1));
426 arm_compute::Iterator out(&tensor, window);
428 unsigned char red = 0;
429 unsigned char green = 0;
430 unsigned char blue = 0;
432 size_t stride_z = tensor.info()->strides_in_bytes()[2];
434 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
440 switch(tensor.info()->data_type())
442 case arm_compute::DataType::U8:
444 *(out.ptr() + 0 * stride_z) = bgr ? blue : red;
445 *(out.ptr() + 1 * stride_z) = green;
446 *(out.ptr() + 2 * stride_z) = bgr ? red : blue;
449 case arm_compute::DataType::F32:
451 *reinterpret_cast<float *>(out.ptr() + 0 * stride_z) = static_cast<float>(bgr ? blue : red);
452 *reinterpret_cast<float *>(out.ptr() + 1 * stride_z) = static_cast<float>(green);
453 *reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue);
458 ARM_COMPUTE_ERROR("Unsupported data type");
464 // Unmap buffer if creating a CLTensor
467 catch(const std::ifstream::failure &e)
469 ARM_COMPUTE_ERROR("Loading PPM file: %s", e.what());
473 /** Return the width of the currently open PPM file.
475 unsigned int width() const
480 /** Return the height of the currently open PPM file.
482 unsigned int height() const
489 unsigned int _width, _height;
496 : _fs(), _shape(), _fortran_order(false), _typestring()
500 /** Open a NPY file and reads its metadata
502 * @param[in] npy_filename File to open
504 void open(const std::string &npy_filename)
506 ARM_COMPUTE_ERROR_ON(is_open());
509 _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
510 _fs.open(npy_filename, std::ios::in | std::ios::binary);
512 std::tie(_shape, _fortran_order, _typestring) = parse_npy_header(_fs);
514 catch(const std::ifstream::failure &e)
516 ARM_COMPUTE_ERROR("Accessing %s: %s", npy_filename.c_str(), e.what());
519 /** Return true if a NPY file is currently open */
522 return _fs.is_open();
525 /** Return true if a NPY file is in fortran order */
528 return _fortran_order;
531 /** Initialise the tensor's metadata with the dimensions of the NPY file currently open
533 * @param[out] tensor Tensor to initialise
534 * @param[in] dt Data type to use for the tensor
536 template <typename T>
537 void init_tensor(T &tensor, arm_compute::DataType dt)
539 ARM_COMPUTE_ERROR_ON(!is_open());
540 ARM_COMPUTE_ERROR_ON(dt != arm_compute::DataType::F32);
542 // Use the size of the input NPY tensor
544 shape.set_num_dimensions(_shape.size());
545 for(size_t i = 0; i < _shape.size(); ++i)
547 shape.set(i, _shape.at(i));
550 arm_compute::TensorInfo tensor_info(shape, 1, dt);
551 tensor.allocator()->init(tensor_info);
554 /** Fill a tensor with the content of the currently open NPY file.
556 * @note If the tensor is a CLTensor, the function maps and unmaps the tensor
558 * @param[in,out] tensor Tensor to fill (Must be allocated, and of matching dimensions with the opened NPY).
560 template <typename T>
561 void fill_tensor(T &tensor)
563 ARM_COMPUTE_ERROR_ON(!is_open());
564 ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
567 // Map buffer if creating a CLTensor
570 // Check if the file is large enough to fill the tensor
571 const size_t current_position = _fs.tellg();
572 _fs.seekg(0, std::ios_base::end);
573 const size_t end_position = _fs.tellg();
574 _fs.seekg(current_position, std::ios_base::beg);
576 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(),
577 "Not enough data in file");
578 ARM_COMPUTE_UNUSED(end_position);
580 // Check if the typestring matches the given one
581 std::string expect_typestr = get_typestring(tensor.info()->data_type());
582 ARM_COMPUTE_ERROR_ON_MSG(_typestring != expect_typestr, "Typestrings mismatch");
584 // Validate tensor shape
585 ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.info()->tensor_shape().num_dimensions(), "Tensor ranks mismatch");
588 for(size_t i = 0; i < _shape.size(); ++i)
590 ARM_COMPUTE_ERROR_ON_MSG(tensor.info()->tensor_shape()[i] != _shape[i], "Tensor dimensions mismatch");
595 for(size_t i = 0; i < _shape.size(); ++i)
597 ARM_COMPUTE_ERROR_ON_MSG(tensor.info()->tensor_shape()[i] != _shape[_shape.size() - i - 1], "Tensor dimensions mismatch");
601 switch(tensor.info()->data_type())
603 case arm_compute::DataType::F32:
606 if(tensor.info()->padding().empty())
608 // If tensor has no padding read directly from stream.
609 _fs.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size());
613 // If tensor has padding accessing tensor elements through execution window.
615 window.use_tensor_dimensions(tensor.info()->tensor_shape());
617 execute_window_loop(window, [&](const Coordinates & id)
619 _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(id)), tensor.info()->element_size());
626 ARM_COMPUTE_ERROR("Unsupported data type");
629 // Unmap buffer if creating a CLTensor
632 catch(const std::ifstream::failure &e)
634 ARM_COMPUTE_ERROR("Loading NPY file: %s", e.what());
640 std::vector<unsigned long> _shape;
642 std::string _typestring;
645 /** Template helper function to save a tensor image to a PPM file.
647 * @note Only U8 and RGB888 formats supported.
648 * @note Only works with 2D tensors.
649 * @note If the input tensor is a CLTensor, the function maps and unmaps the image
651 * @param[in] tensor The tensor to save as PPM file
652 * @param[in] ppm_filename Filename of the file to create.
654 template <typename T>
655 void save_to_ppm(T &tensor, const std::string &ppm_filename)
657 ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::Format::RGB888, arm_compute::Format::U8);
658 ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
664 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
665 fs.open(ppm_filename, std::ios::out | std::ios::binary);
667 const unsigned int width = tensor.info()->tensor_shape()[0];
668 const unsigned int height = tensor.info()->tensor_shape()[1];
671 << width << " " << height << " 255\n";
673 // Map buffer if creating a CLTensor/GCTensor
676 switch(tensor.info()->format())
678 case arm_compute::Format::U8:
680 arm_compute::Window window;
681 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
682 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
684 arm_compute::Iterator in(&tensor, window);
686 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
688 const unsigned char value = *in.ptr();
690 fs << value << value << value;
696 case arm_compute::Format::RGB888:
698 arm_compute::Window window;
699 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, width));
700 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
702 arm_compute::Iterator in(&tensor, window);
704 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
706 fs.write(reinterpret_cast<std::fstream::char_type *>(in.ptr()), width * tensor.info()->element_size());
713 ARM_COMPUTE_ERROR("Unsupported format");
716 // Unmap buffer if creating a CLTensor/GCTensor
719 catch(const std::ofstream::failure &e)
721 ARM_COMPUTE_ERROR("Writing %s: (%s)", ppm_filename.c_str(), e.what());
725 /** Template helper function to save a tensor image to a NPY file.
727 * @note Only F32 data type supported.
728 * @note Only works with 2D tensors.
729 * @note If the input tensor is a CLTensor, the function maps and unmaps the image
731 * @param[in] tensor The tensor to save as NPY file
732 * @param[in] npy_filename Filename of the file to create.
733 * @param[in] fortran_order If true, save matrix in fortran order.
735 template <typename T>
736 void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
738 ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
739 ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
745 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
746 fs.open(npy_filename, std::ios::out | std::ios::binary);
748 const unsigned int width = tensor.info()->tensor_shape()[0];
749 const unsigned int height = tensor.info()->tensor_shape()[1];
750 std::vector<npy::ndarray_len_t> shape(2);
754 shape[0] = height, shape[1] = width;
758 shape[0] = width, shape[1] = height;
761 // Map buffer if creating a CLTensor
764 switch(tensor.info()->data_type())
766 case arm_compute::DataType::F32:
768 std::vector<float> tmp; /* Used only to get the typestring */
769 npy::Typestring typestring_o{ tmp };
770 std::string typestring = typestring_o.str();
772 std::ofstream stream(npy_filename, std::ofstream::binary);
773 npy::write_header(stream, typestring, fortran_order, shape);
775 arm_compute::Window window;
776 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
777 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
779 arm_compute::Iterator in(&tensor, window);
781 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
783 stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
790 ARM_COMPUTE_ERROR("Unsupported format");
793 // Unmap buffer if creating a CLTensor
796 catch(const std::ofstream::failure &e)
798 ARM_COMPUTE_ERROR("Writing %s: (%s)", npy_filename.c_str(), e.what());
802 /** Load the tensor with pre-trained data from a binary file
804 * @param[in] tensor The tensor to be filled. Data type supported: F32.
805 * @param[in] filename Filename of the binary file to load from.
807 template <typename T>
808 void load_trained_data(T &tensor, const std::string &filename)
810 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
816 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
818 fs.open(filename, std::ios::in | std::ios::binary);
822 throw std::runtime_error("Could not load binary data: " + filename);
825 // Map buffer if creating a CLTensor/GCTensor
830 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, 1, 1));
832 for(unsigned int d = 1; d < tensor.info()->num_dimensions(); ++d)
834 window.set(d, Window::Dimension(0, tensor.info()->tensor_shape()[d], 1));
837 arm_compute::Iterator in(&tensor, window);
839 execute_window_loop(window, [&](const Coordinates & id)
841 fs.read(reinterpret_cast<std::fstream::char_type *>(in.ptr()), tensor.info()->tensor_shape()[0] * tensor.info()->element_size());
845 // Unmap buffer if creating a CLTensor/GCTensor
848 catch(const std::ofstream::failure &e)
850 ARM_COMPUTE_ERROR("Writing %s: (%s)", filename.c_str(), e.what());
854 template <typename T>
855 void fill_random_tensor(T &tensor, float lower_bound, float upper_bound)
857 std::random_device rd;
858 std::mt19937 gen(rd());
860 TensorShape shape(tensor.info()->dimension(0), tensor.info()->dimension(1));
863 window.set(Window::DimX, Window::Dimension(0, shape.x(), 1));
864 window.set(Window::DimY, Window::Dimension(0, shape.y(), 1));
868 Iterator it(&tensor, window);
870 switch(tensor.info()->data_type())
872 case arm_compute::DataType::F32:
874 std::uniform_real_distribution<float> dist(lower_bound, upper_bound);
876 execute_window_loop(window, [&](const Coordinates & id)
878 *reinterpret_cast<float *>(it.ptr()) = dist(gen);
886 ARM_COMPUTE_ERROR("Unsupported format");
893 template <typename T>
894 void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::DataType dt)
896 dst.allocator()->init(TensorInfo(TensorShape(src1.info()->dimension(0), src0.info()->dimension(1)), 1, dt));
898 /** This function returns the amount of memory free reading from /proc/meminfo
900 * @return The free memory in kB
902 uint64_t get_mem_free_from_meminfo();
904 } // namespace arm_compute
905 #endif /* __UTILS_UTILS_H__*/