2 * Copyright (c) 2016, 2017 ARM Limited.
4 * SPDX-License-Identifier: MIT
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 #ifndef __UTILS_UTILS_H__
25 #define __UTILS_UTILS_H__
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/core/Validate.h"
31 #include "arm_compute/core/Window.h"
32 #include "arm_compute/runtime/Tensor.h"
33 #include "libnpy/npy.hpp"
34 #include "support/ToolchainSupport.h"
37 #include "arm_compute/core/CL/OpenCL.h"
38 #include "arm_compute/runtime/CL/CLDistribution1D.h"
39 #include "arm_compute/runtime/CL/CLTensor.h"
40 #endif /* ARM_COMPUTE_CL */
42 #include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
43 #endif /* ARM_COMPUTE_GC */
58 /** Signature of an example to run
60 * @param[in] argc Number of command line arguments
61 * @param[in] argv Command line arguments
63 using example = void(int argc, const char **argv);
65 /** Run an example and handle the potential exceptions it throws
67 * @param[in] argc Number of command line arguments
68 * @param[in] argv Command line arguments
69 * @param[in] func Pointer to the function containing the code to run
71 int run_example(int argc, const char **argv, example &func);
73 /** Draw a RGB rectangular window for the detected object
75 * @param[in, out] tensor Input tensor where the rectangle will be drawn on. Format supported: RGB888
76 * @param[in] rect Geometry of the rectangular window
77 * @param[in] r Red colour to use
78 * @param[in] g Green colour to use
79 * @param[in] b Blue colour to use
81 void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::DetectionWindow &rect, uint8_t r, uint8_t g, uint8_t b);
83 /** Parse the ppm header from an input file stream. At the end of the execution,
84 * the file position pointer will be located at the first pixel stored in the ppm file
86 * @param[in] fs Input file stream to parse
88 * @return The width, height and max value stored in the header of the PPM file
90 std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs);
92 /** Parse the npy header from an input file stream. At the end of the execution,
93 * the file position pointer will be located at the first pixel stored in the npy file
95 * @param[in] fs Input file stream to parse
97 * @return The width and height stored in the header of the NPY file
99 std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs);
101 /** Obtain numpy type string from DataType.
103 * @param[in] data_type Data type.
105 * @return numpy type string.
107 inline std::string get_typestring(DataType data_type)
110 const unsigned int i = 1;
111 const char *c = reinterpret_cast<const char *>(&i);
112 std::string endianness;
115 endianness = std::string("<");
119 endianness = std::string(">");
121 const std::string no_endianness("|");
126 return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
128 return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
130 return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
132 return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
134 return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
136 return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
138 return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
140 return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
142 return endianness + "f" + support::cpp11::to_string(sizeof(float));
144 return endianness + "f" + support::cpp11::to_string(sizeof(double));
145 case DataType::SIZET:
146 return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
148 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
152 /** Maps a tensor if needed
154 * @param[in] tensor Tensor to be mapped
155 * @param[in] blocking Specified if map is blocking or not
157 template <typename T>
158 inline void map(T &tensor, bool blocking)
160 ARM_COMPUTE_UNUSED(tensor);
161 ARM_COMPUTE_UNUSED(blocking);
164 /** Unmaps a tensor if needed
166 * @param tensor Tensor to be unmapped
168 template <typename T>
169 inline void unmap(T &tensor)
171 ARM_COMPUTE_UNUSED(tensor);
174 #ifdef ARM_COMPUTE_CL
175 /** Maps a tensor if needed
177 * @param[in] tensor Tensor to be mapped
178 * @param[in] blocking Specified if map is blocking or not
180 inline void map(CLTensor &tensor, bool blocking)
182 tensor.map(blocking);
185 /** Unmaps a tensor if needed
187 * @param tensor Tensor to be unmapped
189 inline void unmap(CLTensor &tensor)
194 /** Maps a distribution if needed
196 * @param[in] distribution Distribution to be mapped
197 * @param[in] blocking Specified if map is blocking or not
199 inline void map(CLDistribution1D &distribution, bool blocking)
201 distribution.map(blocking);
204 /** Unmaps a distribution if needed
206 * @param distribution Distribution to be unmapped
208 inline void unmap(CLDistribution1D &distribution)
210 distribution.unmap();
212 #endif /* ARM_COMPUTE_CL */
214 #ifdef ARM_COMPUTE_GC
215 /** Maps a tensor if needed
217 * @param[in] tensor Tensor to be mapped
218 * @param[in] blocking Specified if map is blocking or not
220 inline void map(GCTensor &tensor, bool blocking)
222 tensor.map(blocking);
225 /** Unmaps a tensor if needed
227 * @param tensor Tensor to be unmapped
229 inline void unmap(GCTensor &tensor)
233 #endif /* ARM_COMPUTE_GC */
235 /** Class to load the content of a PPM file into an Image
241 : _fs(), _width(0), _height(0)
244 /** Open a PPM file and reads its metadata (Width, height)
246 * @param[in] ppm_filename File to open
248 void open(const std::string &ppm_filename)
250 ARM_COMPUTE_ERROR_ON(is_open());
253 _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
254 _fs.open(ppm_filename, std::ios::in | std::ios::binary);
256 unsigned int max_val = 0;
257 std::tie(_width, _height, max_val) = parse_ppm_header(_fs);
259 ARM_COMPUTE_ERROR_ON_MSG(max_val >= 256, "2 bytes per colour channel not supported in file %s", ppm_filename.c_str());
261 catch(const std::ifstream::failure &e)
263 ARM_COMPUTE_ERROR("Accessing %s: %s", ppm_filename.c_str(), e.what());
266 /** Return true if a PPM file is currently open
270 return _fs.is_open();
273 /** Initialise an image's metadata with the dimensions of the PPM file currently open
275 * @param[out] image Image to initialise
276 * @param[in] format Format to use for the image (Must be RGB888 or U8)
278 template <typename T>
279 void init_image(T &image, arm_compute::Format format)
281 ARM_COMPUTE_ERROR_ON(!is_open());
282 ARM_COMPUTE_ERROR_ON(format != arm_compute::Format::RGB888 && format != arm_compute::Format::U8);
284 // Use the size of the input PPM image
285 arm_compute::TensorInfo image_info(_width, _height, format);
286 image.allocator()->init(image_info);
289 /** Fill an image with the content of the currently open PPM file.
291 * @note If the image is a CLImage, the function maps and unmaps the image
293 * @param[in,out] image Image to fill (Must be allocated, and of matching dimensions with the opened PPM).
295 template <typename T>
296 void fill_image(T &image)
298 ARM_COMPUTE_ERROR_ON(!is_open());
299 ARM_COMPUTE_ERROR_ON(image.info()->dimension(0) != _width || image.info()->dimension(1) != _height);
300 ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&image, arm_compute::Format::U8, arm_compute::Format::RGB888);
303 // Map buffer if creating a CLTensor/GCTensor
306 // Check if the file is large enough to fill the image
307 const size_t current_position = _fs.tellg();
308 _fs.seekg(0, std::ios_base::end);
309 const size_t end_position = _fs.tellg();
310 _fs.seekg(current_position, std::ios_base::beg);
312 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < image.info()->tensor_shape().total_size() * image.info()->element_size(),
313 "Not enough data in file");
314 ARM_COMPUTE_UNUSED(end_position);
316 switch(image.info()->format())
318 case arm_compute::Format::U8:
320 // We need to convert the data from RGB to grayscale:
321 // Iterate through every pixel of the image
322 arm_compute::Window window;
323 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, _width, 1));
324 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
326 arm_compute::Iterator out(&image, window);
328 unsigned char red = 0;
329 unsigned char green = 0;
330 unsigned char blue = 0;
332 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
338 *out.ptr() = 0.2126f * red + 0.7152f * green + 0.0722f * blue;
344 case arm_compute::Format::RGB888:
346 // There is no format conversion needed: we can simply copy the content of the input file to the image one row at the time.
347 // Create a vertical window to iterate through the image's rows:
348 arm_compute::Window window;
349 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
351 arm_compute::Iterator out(&image, window);
353 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
355 // Copy one row from the input file to the current row of the image:
356 _fs.read(reinterpret_cast<std::fstream::char_type *>(out.ptr()), _width * image.info()->element_size());
363 ARM_COMPUTE_ERROR("Unsupported format");
366 // Unmap buffer if creating a CLTensor/GCTensor
369 catch(const std::ifstream::failure &e)
371 ARM_COMPUTE_ERROR("Loading PPM file: %s", e.what());
375 /** Fill a tensor with 3 planes (one for each channel) with the content of the currently open PPM file.
377 * @note If the image is a CLImage, the function maps and unmaps the image
379 * @param[in,out] tensor Tensor with 3 planes to fill (Must be allocated, and of matching dimensions with the opened PPM). Data types supported: U8/F32
380 * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
382 template <typename T>
383 void fill_planar_tensor(T &tensor, bool bgr = false)
385 ARM_COMPUTE_ERROR_ON(!is_open());
386 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::F32);
387 ARM_COMPUTE_ERROR_ON(tensor.info()->dimension(0) != _width || tensor.info()->dimension(1) != _height || tensor.info()->dimension(2) != 3);
391 // Map buffer if creating a CLTensor
394 // Check if the file is large enough to fill the image
395 const size_t current_position = _fs.tellg();
396 _fs.seekg(0, std::ios_base::end);
397 const size_t end_position = _fs.tellg();
398 _fs.seekg(current_position, std::ios_base::beg);
400 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size(),
401 "Not enough data in file");
402 ARM_COMPUTE_UNUSED(end_position);
404 // Iterate through every pixel of the image
405 arm_compute::Window window;
406 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, _width, 1));
407 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
408 window.set(arm_compute::Window::DimZ, arm_compute::Window::Dimension(0, 1, 1));
410 arm_compute::Iterator out(&tensor, window);
412 unsigned char red = 0;
413 unsigned char green = 0;
414 unsigned char blue = 0;
416 size_t stride_z = tensor.info()->strides_in_bytes()[2];
418 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
424 switch(tensor.info()->data_type())
426 case arm_compute::DataType::U8:
428 *(out.ptr() + 0 * stride_z) = bgr ? blue : red;
429 *(out.ptr() + 1 * stride_z) = green;
430 *(out.ptr() + 2 * stride_z) = bgr ? red : blue;
433 case arm_compute::DataType::F32:
435 *reinterpret_cast<float *>(out.ptr() + 0 * stride_z) = static_cast<float>(bgr ? blue : red);
436 *reinterpret_cast<float *>(out.ptr() + 1 * stride_z) = static_cast<float>(green);
437 *reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue);
442 ARM_COMPUTE_ERROR("Unsupported data type");
448 // Unmap buffer if creating a CLTensor
451 catch(const std::ifstream::failure &e)
453 ARM_COMPUTE_ERROR("Loading PPM file: %s", e.what());
457 /** Return the width of the currently open PPM file.
459 unsigned int width() const
464 /** Return the height of the currently open PPM file.
466 unsigned int height() const
473 unsigned int _width, _height;
480 : _fs(), _shape(), _fortran_order(false), _typestring()
484 /** Open a NPY file and reads its metadata
486 * @param[in] npy_filename File to open
488 void open(const std::string &npy_filename)
490 ARM_COMPUTE_ERROR_ON(is_open());
493 _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
494 _fs.open(npy_filename, std::ios::in | std::ios::binary);
496 std::tie(_shape, _fortran_order, _typestring) = parse_npy_header(_fs);
498 catch(const std::ifstream::failure &e)
500 ARM_COMPUTE_ERROR("Accessing %s: %s", npy_filename.c_str(), e.what());
503 /** Return true if a NPY file is currently open */
506 return _fs.is_open();
509 /** Return true if a NPY file is in fortran order */
512 return _fortran_order;
515 /** Initialise the tensor's metadata with the dimensions of the NPY file currently open
517 * @param[out] tensor Tensor to initialise
518 * @param[in] dt Data type to use for the tensor
520 template <typename T>
521 void init_tensor(T &tensor, arm_compute::DataType dt)
523 ARM_COMPUTE_ERROR_ON(!is_open());
524 ARM_COMPUTE_ERROR_ON(dt != arm_compute::DataType::F32);
526 // Use the size of the input NPY tensor
528 shape.set_num_dimensions(_shape.size());
529 for(size_t i = 0; i < _shape.size(); ++i)
531 shape.set(i, _shape.at(i));
534 arm_compute::TensorInfo tensor_info(shape, 1, dt);
535 tensor.allocator()->init(tensor_info);
538 /** Fill a tensor with the content of the currently open NPY file.
540 * @note If the tensor is a CLTensor, the function maps and unmaps the tensor
542 * @param[in,out] tensor Tensor to fill (Must be allocated, and of matching dimensions with the opened NPY).
544 template <typename T>
545 void fill_tensor(T &tensor)
547 ARM_COMPUTE_ERROR_ON(!is_open());
548 ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::DataType::F32);
551 // Map buffer if creating a CLTensor
554 // Check if the file is large enough to fill the tensor
555 const size_t current_position = _fs.tellg();
556 _fs.seekg(0, std::ios_base::end);
557 const size_t end_position = _fs.tellg();
558 _fs.seekg(current_position, std::ios_base::beg);
560 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(),
561 "Not enough data in file");
562 ARM_COMPUTE_UNUSED(end_position);
564 // Check if the typestring matches the given one
565 std::string expect_typestr = get_typestring(tensor.info()->data_type());
566 ARM_COMPUTE_ERROR_ON_MSG(_typestring != expect_typestr, "Typestrings mismatch");
568 // Validate tensor shape
569 ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.shape().num_dimensions(), "Tensor ranks mismatch");
572 for(size_t i = 0; i < _shape.size(); ++i)
574 ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != _shape[i], "Tensor dimensions mismatch");
579 for(size_t i = 0; i < _shape.size(); ++i)
581 ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != _shape[_shape.size() - i - 1], "Tensor dimensions mismatch");
585 switch(tensor.info()->data_type())
587 case arm_compute::DataType::F32:
590 if(tensor.info()->padding().empty())
592 // If tensor has no padding read directly from stream.
593 _fs.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size());
597 // If tensor has padding accessing tensor elements through execution window.
599 window.use_tensor_dimensions(tensor.info()->tensor_shape());
601 execute_window_loop(window, [&](const Coordinates & id)
603 _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(id)), tensor.info()->element_size());
610 ARM_COMPUTE_ERROR("Unsupported data type");
613 // Unmap buffer if creating a CLTensor
616 catch(const std::ifstream::failure &e)
618 ARM_COMPUTE_ERROR("Loading NPY file: %s", e.what());
624 std::vector<unsigned long> _shape;
626 std::string _typestring;
629 /** Template helper function to save a tensor image to a PPM file.
631 * @note Only U8 and RGB888 formats supported.
632 * @note Only works with 2D tensors.
633 * @note If the input tensor is a CLTensor, the function maps and unmaps the image
635 * @param[in] tensor The tensor to save as PPM file
636 * @param[in] ppm_filename Filename of the file to create.
638 template <typename T>
639 void save_to_ppm(T &tensor, const std::string &ppm_filename)
641 ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::Format::RGB888, arm_compute::Format::U8);
642 ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
648 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
649 fs.open(ppm_filename, std::ios::out | std::ios::binary);
651 const unsigned int width = tensor.info()->tensor_shape()[0];
652 const unsigned int height = tensor.info()->tensor_shape()[1];
655 << width << " " << height << " 255\n";
657 // Map buffer if creating a CLTensor/GCTensor
660 switch(tensor.info()->format())
662 case arm_compute::Format::U8:
664 arm_compute::Window window;
665 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
666 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
668 arm_compute::Iterator in(&tensor, window);
670 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
672 const unsigned char value = *in.ptr();
674 fs << value << value << value;
680 case arm_compute::Format::RGB888:
682 arm_compute::Window window;
683 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, width));
684 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
686 arm_compute::Iterator in(&tensor, window);
688 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
690 fs.write(reinterpret_cast<std::fstream::char_type *>(in.ptr()), width * tensor.info()->element_size());
697 ARM_COMPUTE_ERROR("Unsupported format");
700 // Unmap buffer if creating a CLTensor/GCTensor
703 catch(const std::ofstream::failure &e)
705 ARM_COMPUTE_ERROR("Writing %s: (%s)", ppm_filename.c_str(), e.what());
709 /** Template helper function to save a tensor image to a NPY file.
711 * @note Only F32 data type supported.
712 * @note Only works with 2D tensors.
713 * @note If the input tensor is a CLTensor, the function maps and unmaps the image
715 * @param[in] tensor The tensor to save as NPY file
716 * @param[in] npy_filename Filename of the file to create.
717 * @param[in] fortran_order If true, save matrix in fortran order.
719 template <typename T>
720 void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
722 ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
723 ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
729 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
730 fs.open(npy_filename, std::ios::out | std::ios::binary);
732 const unsigned int width = tensor.info()->tensor_shape()[0];
733 const unsigned int height = tensor.info()->tensor_shape()[1];
734 std::vector<npy::ndarray_len_t> shape(2);
738 shape[0] = height, shape[1] = width;
742 shape[0] = width, shape[1] = height;
745 // Map buffer if creating a CLTensor
748 switch(tensor.info()->data_type())
750 case arm_compute::DataType::F32:
752 std::vector<float> tmp; /* Used only to get the typestring */
753 npy::Typestring typestring_o{ tmp };
754 std::string typestring = typestring_o.str();
756 std::ofstream stream(npy_filename, std::ofstream::binary);
757 npy::write_header(stream, typestring, fortran_order, shape);
759 arm_compute::Window window;
760 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
761 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
763 arm_compute::Iterator in(&tensor, window);
765 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
767 stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
774 ARM_COMPUTE_ERROR("Unsupported format");
777 // Unmap buffer if creating a CLTensor
780 catch(const std::ofstream::failure &e)
782 ARM_COMPUTE_ERROR("Writing %s: (%s)", npy_filename.c_str(), e.what());
786 /** Load the tensor with pre-trained data from a binary file
788 * @param[in] tensor The tensor to be filled. Data type supported: F32.
789 * @param[in] filename Filename of the binary file to load from.
791 template <typename T>
792 void load_trained_data(T &tensor, const std::string &filename)
794 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
800 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
802 fs.open(filename, std::ios::in | std::ios::binary);
806 throw std::runtime_error("Could not load binary data: " + filename);
809 // Map buffer if creating a CLTensor/GCTensor
814 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, 1, 1));
816 for(unsigned int d = 1; d < tensor.info()->num_dimensions(); ++d)
818 window.set(d, Window::Dimension(0, tensor.info()->tensor_shape()[d], 1));
821 arm_compute::Iterator in(&tensor, window);
823 execute_window_loop(window, [&](const Coordinates & id)
825 fs.read(reinterpret_cast<std::fstream::char_type *>(in.ptr()), tensor.info()->tensor_shape()[0] * tensor.info()->element_size());
829 // Unmap buffer if creating a CLTensor/GCTensor
832 catch(const std::ofstream::failure &e)
834 ARM_COMPUTE_ERROR("Writing %s: (%s)", filename.c_str(), e.what());
838 template <typename T>
839 void fill_random_tensor(T &tensor, float lower_bound, float upper_bound)
841 std::random_device rd;
842 std::mt19937 gen(rd());
844 TensorShape shape(tensor.info()->dimension(0), tensor.info()->dimension(1));
847 window.set(Window::DimX, Window::Dimension(0, shape.x(), 1));
848 window.set(Window::DimY, Window::Dimension(0, shape.y(), 1));
852 Iterator it(&tensor, window);
854 switch(tensor.info()->data_type())
856 case arm_compute::DataType::F32:
858 std::uniform_real_distribution<float> dist(lower_bound, upper_bound);
860 execute_window_loop(window, [&](const Coordinates & id)
862 *reinterpret_cast<float *>(it.ptr()) = dist(gen);
870 ARM_COMPUTE_ERROR("Unsupported format");
877 template <typename T>
878 void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::DataType dt)
880 dst.allocator()->init(TensorInfo(TensorShape(src1.info()->dimension(0), src0.info()->dimension(1)), 1, dt));
884 } // namespace arm_compute
885 #endif /* __UTILS_UTILS_H__*/