/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
#include "arm_compute/core/ITensor.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#include "libnpy/npy.hpp"
+#pragma GCC diagnostic pop
+
#include <cctype>
#include <fstream>
#include <limits>
{
return RawTensor(find_or_create_raw_tensor(name, format, channel));
}
+
+namespace detail
+{
+inline void validate_npy_header(std::ifstream &stream, const std::string &expect_typestr, const TensorShape &expect_shape)
+{
+ ARM_COMPUTE_UNUSED(expect_typestr);
+ ARM_COMPUTE_UNUSED(expect_shape);
+
+ std::string header = npy::read_header(stream);
+
+ // Parse header
+ std::vector<unsigned long> shape;
+ bool fortran_order = false;
+ std::string typestr;
+ npy::parse_header(header, typestr, fortran_order, shape);
+
+ // Check if the typestring matches the given one
+ ARM_COMPUTE_ERROR_ON_MSG(typestr != expect_typestr, "Typestrings mismatch");
+
+ // Validate tensor shape
+ ARM_COMPUTE_ERROR_ON_MSG(shape.size() != expect_shape.num_dimensions(), "Tensor ranks mismatch");
+ if(fortran_order)
+ {
+ for(size_t i = 0; i < shape.size(); ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(expect_shape[i] != shape[i], "Tensor dimensions mismatch");
+ }
+ }
+ else
+ {
+ for(size_t i = 0; i < shape.size(); ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(expect_shape[i] != shape[shape.size() - i - 1], "Tensor dimensions mismatch");
+ }
+ }
+}
+} // namespace detail
} // namespace test
} // namespace arm_compute
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Window.h"
#include "arm_compute/core/utils/misc/Random.h"
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#include "libnpy/npy.hpp"
-#pragma GCC diagnostic pop
#include "tests/RawTensor.h"
#include "tests/TensorCache.h"
#include "tests/Utils.h"
});
return converted;
}
+
+/* Read npy header and check the payload is suitable for the specified type and shape
+ *
+ * @param[in] stream ifstream of the npy file
+ * @param[in] expect_typestr Expected typestr
+ * @param[in] expect_shape Shape of tensor expected to receive the data
+ *
+ * @note Advances stream to the beginning of the data payload
+ */
+void validate_npy_header(std::ifstream &stream, const std::string &expect_typestr, const TensorShape &expect_shape);
} // namespace detail
template <typename T, typename D>
#endif /* _WIN32 */
const std::string path = _library_path + path_separator + name;
- std::vector<unsigned long> shape;
-
// Open file
std::ifstream stream(path, std::ios::in | std::ios::binary);
if(!stream.good())
{
throw framework::FileNotFound("Could not load npy file: " + path);
}
- std::string header = npy::read_header(stream);
-
- // Parse header
- bool fortran_order = false;
- std::string typestr;
- npy::parse_header(header, typestr, fortran_order, shape);
- // Check if the typestring matches the given one
- std::string expect_typestr = get_typestring(tensor.data_type());
- ARM_COMPUTE_ERROR_ON_MSG(typestr != expect_typestr, "Typestrings mismatch");
-
- // Validate tensor shape
- ARM_COMPUTE_ERROR_ON_MSG(shape.size() != tensor.shape().num_dimensions(), "Tensor ranks mismatch");
- if(fortran_order)
- {
- for(size_t i = 0; i < shape.size(); ++i)
- {
- ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != shape[i], "Tensor dimensions mismatch");
- }
- }
- else
- {
- for(size_t i = 0; i < shape.size(); ++i)
- {
- ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != shape[shape.size() - i - 1], "Tensor dimensions mismatch");
- }
- }
+ validate_npy_header(stream, tensor.data_type(), tensor.shape());
// Read data
if(tensor.padding().empty())
#include "arm_compute/runtime/Scheduler.h"
#include "support/MemorySupport.h"
#include "tests/framework/ParametersLibrary.h"
+#include "tests/framework/TestFilter.h"
#ifdef ARM_COMPUTE_CL
#include "arm_compute/runtime/CL/CLRuntimeContext.h"
std::unique_ptr<InstrumentsInfo> instruments_info;
Framework::Framework()
+ : _test_filter(nullptr)
{
_available_instruments.emplace(std::pair<InstrumentType, ScaleFactor>(InstrumentType::WALL_CLOCK_TIMESTAMPS, ScaleFactor::NONE), Instrument::make_instrument<WallClockTimestamps, ScaleFactor::NONE>);
_available_instruments.emplace(std::pair<InstrumentType, ScaleFactor>(InstrumentType::WALL_CLOCK_TIMESTAMPS, ScaleFactor::TIME_MS),
void Framework::init(const FrameworkConfig &config)
{
- _test_filter = TestFilter(config.mode, config.name_filter, config.id_filter);
+ _test_filter.reset(new TestFilter(config.mode, config.name_filter, config.id_filter));
_num_iterations = config.num_iterations;
_log_level = config.log_level;
_cooldown_sec = config.cooldown_sec;
const std::string test_case_name = test_factory->name();
const TestInfo test_info{ id, test_case_name, test_factory->mode(), test_factory->status() };
- if(_test_filter.is_selected(test_info))
+ if(_test_filter->is_selected(test_info))
{
#ifdef ARM_COMPUTE_CL
// Every 100 tests, reset the OpenCL context to release the allocated memory
{
TestInfo test_info{ id, factory->name(), factory->mode(), factory->status() };
- if(_test_filter.is_selected(test_info))
+ if(_test_filter->is_selected(test_info))
{
ids.emplace_back(std::move(test_info));
}
#include "Profiler.h"
#include "TestCase.h"
#include "TestCaseFactory.h"
-#include "TestFilter.h"
#include "TestResult.h"
#include "Utils.h"
#include "instruments/Instruments.h"
#include <memory>
#include <numeric>
#include <ostream>
-#include <regex>
#include <set>
#include <sstream>
#include <string>
-#include <tuple>
#include <vector>
namespace arm_compute
{
namespace framework
{
+class TestFilter;
+
/** Framework configuration structure */
struct FrameworkConfig
{
std::map<InstrumentsDescription, create_function *> _available_instruments{};
std::set<framework::InstrumentsDescription> _instruments{ std::pair<InstrumentType, ScaleFactor>(InstrumentType::NONE, ScaleFactor::NONE) };
- TestFilter _test_filter{};
+ std::unique_ptr<TestFilter> _test_filter;
LogLevel _log_level{ LogLevel::ALL };
const TestInfo *_current_test_info{ nullptr };
TestResult *_current_test_result{ nullptr };