armnn::BackendOptions option("GpuAcc", {{"TuningLevel", atoi(options_values[i])}});
options.AddBackendOption(option);
}
+ else if (std::string(options_keys[i]) == std::string("gpu-mlgo-tuning-file"))
+ {
+ armnn::BackendOptions option("GpuAcc", {{"MLGOTuningFilePath", std::string(options_values[i])}});
+ options.AddBackendOption(option);
+ }
else if (std::string(options_keys[i]) == std::string("gpu-tuning-file"))
{
armnn::BackendOptions option("GpuAcc", {{"TuningFile", std::string(options_values[i])}});
struct ClBackendContext::ClContextControlWrapper
{
ClContextControlWrapper(arm_compute::CLTuner* tuner,
+ arm_compute::CLGEMMHeuristicsHandle* heuristicsHandle,
bool profilingEnabled)
- : m_ClContextControl(tuner, profilingEnabled)
+ : m_ClContextControl(tuner, heuristicsHandle, profilingEnabled)
{}
bool Sync()
bool kernelProfiling = options.m_EnableGpuProfiling;
arm_compute::CLTuner* tuner = nullptr;
+ arm_compute::CLGEMMHeuristicsHandle* mlgoTuner = nullptr;
bool useLegacyTunerAPI = options.m_GpuAccTunedParameters.get() != nullptr;
if (useLegacyTunerAPI)
{
{
tuningLevel = ParseTuningLevel(value, defaultTuningLevel);
}
+ else if (name == "MLGOTuningFilePath")
+ {
+ m_MLGOTuningFile = ParseFile(value, "");
+ }
});
// Create the tuner, in tuning mode initially.
ARMNN_LOG(warning) << "Could not load GpuAcc tuner data file.";
}
}
+
+ if (!m_MLGOTuningFile.empty())
+ {
+ try
+ {
+ ARMNN_LOG(info) << "Loading Gpu MLGO tuning data from file: " << m_TuningFile;
+ if(m_MLGOTuner.reload_from_file(m_MLGOTuningFile.c_str()))
+ {
+ mlgoTuner = &m_MLGOTuner;
+ }
+ }
+ catch (const std::exception& e)
+ {
+ ARMNN_LOG(warning) << "Could not load GpuAcc MLGO tuner data file.";
+ }
+ }
+
tuner = m_Tuner.get();
}
m_ClContextControlWrapper = std::make_unique<ClContextControlWrapper>(
tuner,
+ mlgoTuner,
kernelProfiling
- );
+ );
}
bool ClBackendContext::BeforeLoadNetwork(NetworkId)
#include <mutex>
#include <arm_compute/runtime/CL/CLTuner.h>
+#include <arm_compute/runtime/CL/CLGEMMHeuristicsHandle.h>
namespace armnn
{
std::unique_ptr<arm_compute::CLTuner> m_Tuner;
std::string m_TuningFile;
+
+protected:
+ arm_compute::CLGEMMHeuristicsHandle m_MLGOTuner;
+ std::string m_MLGOTuningFile;
};
} // namespace armnn
\ No newline at end of file
{
ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
+ arm_compute::CLGEMMHeuristicsHandle* heuristicsHandle,
bool profilingEnabled)
: m_Tuner(tuner)
+ , m_HeuristicsHandle(heuristicsHandle)
, m_ProfilingEnabled(profilingEnabled)
{
// Ignore m_ProfilingEnabled if unused to avoid compiling problems when ArmCompute is disabled.
// Note the first argument (path to cl source code) will be ignored as they should be embedded in the armcompute.
arm_compute::CLKernelLibrary::get().init(".", context, device);
- arm_compute::CLScheduler::get().init(context, commandQueue, device, m_Tuner);
+ arm_compute::CLScheduler::get().init(context, commandQueue, device, m_Tuner, m_HeuristicsHandle);
}
void ClContextControl::ClearClCache()
#include "armnn/IRuntime.hpp"
#include <arm_compute/runtime/CL/CLTuner.h>
+#include <arm_compute/runtime/CL/CLGEMMHeuristicsHandle.h>
namespace armnn
{
public:
ClContextControl(arm_compute::CLTuner* = nullptr,
+ arm_compute::CLGEMMHeuristicsHandle* = nullptr,
bool profilingEnabled = false);
virtual ~ClContextControl();
void DoLoadOpenClRuntime(bool updateTunedParameters);
arm_compute::CLTuner* m_Tuner;
+ arm_compute::CLGEMMHeuristicsHandle* m_HeuristicsHandle;
bool m_ProfilingEnabled;
};
TuningLevel m_TuningLevel;
arm_compute::CLTuner m_Tuner;
+ arm_compute::CLGEMMHeuristicsHandle m_HeuristicsHandle;
};
} // namespace armnn
{
// Initialising ClContextControl to ensure OpenCL is loaded correctly for each test case
ClContextControlFixtureBase()
- : m_ClContextControl(nullptr, ProfilingEnabled) {}
+ : m_ClContextControl(nullptr, nullptr, ProfilingEnabled) {}
armnn::ClContextControl m_ClContextControl;
};
#include <test/GraphUtils.hpp>
#include <cl/ClWorkloadFactory.hpp>
+#include <cl/ClBackendContext.hpp>
+
+#include <Filesystem.hpp>
+
#include <boost/test/unit_test.hpp>
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
}
+BOOST_AUTO_TEST_CASE(CheckMLGOTuningFile)
+{
+ class ClBackendContextTestClass : public armnn::ClBackendContext
+ {
+ public:
+ ClBackendContextTestClass(const armnn::IRuntime::CreationOptions &options) : ClBackendContext(options)
+ {}
+
+ bool call_reload_from_file()
+ {
+ return m_MLGOTuner.reload_from_file(m_MLGOTuningFile);
+ }
+ };
+
+ const std::string validText{
+ "<header>\n"
+ "gemm-version, [1,2,1]\n"
+ "ip-type,gpu\n"
+ "</header>\n"
+ "<heuristics-table>\n"
+ "0, g71 , 8, f32, best-performance, static, gemm-type, [m,n,k,n]\n"
+ "1, g71 , 8, f32, best-performance, static, gemm-config-reshaped-only-rhs, [m,n,k,n]\n"
+ "2, g71 , 8, f32, best-performance, static, gemm-config-reshaped, [m,n,k,n]\n"
+ "3, g71 , 8, qasymm8, best-performance, static, gemm-type, [m,n,k,n]\n"
+ "4, g71 , 8, qasymm8, best-performance, static, gemm-config-reshaped-only-rhs, [m,n,k,n]\n"
+ "5, g71 , 8, qasymm8, best-performance, static, gemm-config-native, [m,n,k,n]\n"
+ "</heuristics-table>\n"
+ "<heuristic, 0>\n"
+ "b , 0, var, r_mn, >=, num, 2., 1, 2\n"
+ "l , 1, gemm-type, reshaped\n"
+ "l , 2, gemm-type, reshaped-only-rhs\n"
+ "</heuristic>\n"
+ "<heuristic, 1>\n"
+ "l ,0,gemm-config-reshaped-only-rhs, [2, 4,4,4,1,1,0]\n"
+ "</heuristic>\n"
+ "<heuristic, 2>\n"
+ "l ,0,gemm-config-reshaped,[4,2,8,16,16,1,0,1,0]\n"
+ "</heuristic>\n"
+ "<heuristic, 3>\n"
+ "l , 0, gemm-type, native\n"
+ "</heuristic>\n"
+ "<heuristic, 4>\n"
+ "l ,0,gemm-config-reshaped-only-rhs, [2, 4,4,4,1,1,0]\n"
+ "</heuristic>\n"
+ "<heuristic, 5>\n"
+ "l ,0,gemm-config-native,[4,2,8]\n"
+ "</heuristic>\n"};
+
+ const std::string invalidText{"ʕノ•ᴥ•ʔノ ︵ ┻━┻"};
+
+ fs::path validFile = armnnUtils::Filesystem::NamedTempFile("validFile.mlgo");
+ fs::path invalidFile = armnnUtils::Filesystem::NamedTempFile("invalidFile.mlgo");
+
+ try
+ {
+ std::ofstream ofs1{validFile};
+ ofs1 << validText << std::endl;
+ ofs1.close();
+
+ std::ofstream ofs2{invalidFile};
+ ofs2 << invalidText << std::endl;
+ ofs2.close();
+ }
+ catch (std::exception &e)
+ {
+ std::cerr << "Unable to write to file at location [" << validFile.c_str() << "] : " << e.what() << std::endl;
+ BOOST_TEST(false);
+ }
+
+ armnn::IRuntime::CreationOptions creationOptions1;
+ armnn::BackendOptions validOptions
+ {
+ "GpuAcc",
+ {
+ {"MLGOTuningFilePath", validFile.c_str()}
+ }
+ };
+
+ creationOptions1.m_BackendOptions.emplace_back(validOptions);
+ ClBackendContextTestClass clBackendContext1(creationOptions1);
+ BOOST_TEST(clBackendContext1.call_reload_from_file());
+
+ armnn::BackendOptions invalidOptions
+ {
+ "GpuAcc",
+ {
+ {"MLGOTuningFilePath", invalidFile.c_str()}
+ }
+ };
+
+ armnn::IRuntime::CreationOptions creationOptions2;
+ creationOptions2.m_BackendOptions.emplace_back(invalidOptions);
+ ClBackendContextTestClass clBackendContext2(creationOptions2);
+ BOOST_TEST(clBackendContext2.call_reload_from_file() == false);
+
+ armnn::BackendOptions invalidPathOptions
+ {
+ "GpuAcc",
+ {
+ {"MLGOTuningFilePath", "not_a_real_file_path"}
+ }
+ };
+
+ armnn::IRuntime::CreationOptions creationOptions3;
+ creationOptions3.m_BackendOptions.emplace_back(invalidPathOptions);
+ ClBackendContextTestClass clBackendContext3(creationOptions3);
+ BOOST_TEST(clBackendContext3.call_reload_from_file() == false);
+}
+
BOOST_AUTO_TEST_SUITE_END();
// Initialising ClContextControl to ensure OpenCL is loaded correctly for each test case.
// NOTE: Profiling needs to be enabled in ClContextControl to be able to obtain execution
// times from OpenClTimer.
- OpenClFixture() : m_ClContextControl(nullptr, true) {}
+ OpenClFixture() : m_ClContextControl(nullptr, nullptr, true) {}
~OpenClFixture() {}
ClContextControl m_ClContextControl;
inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
+ inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
for(const std::string& inputName: params.m_InputNames)
{
double m_ThresholdTime;
int m_TuningLevel;
std::string m_TuningPath;
+ std::string m_MLGOTuningFilePath;
// Ensures that the parameters for ExecuteNetwork fit together
void ValidateParams();
("tuning-path",
"Path to tuning file. Enables use of CL tuning",
- cxxopts::value<std::string>(m_ExNetParams.m_TuningPath));
+ cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
+
+ ("MLGOTuningFilePath",
+ "Path to tuning file. Enables use of CL MLGO tuning",
+ cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath));
m_CxxOptions.add_options("d) Profiling")
("a,enable-external-profiling",
{
{"TuningLevel", m_ExNetParams.m_TuningLevel},
{"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
- {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling}
+ {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
+ {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
}
}
);
bool m_SaveCachedNetwork;
std::string m_CachedNetworkFilePath;
unsigned int m_NumberOfThreads;
+ std::string m_MLGOTuningFilePath;
+
Params()
: m_ComputeDevices{}
, m_SaveCachedNetwork(false)
, m_CachedNetworkFilePath("")
, m_NumberOfThreads(0)
+ , m_MLGOTuningFilePath("")
{}
};
{
{ "FastMathEnabled", params.m_EnableFastMath },
{ "SaveCachedNetwork", params.m_SaveCachedNetwork },
- { "CachedNetworkFilePath", params.m_CachedNetworkFilePath }
+ { "CachedNetworkFilePath", params.m_CachedNetworkFilePath },
+ { "MLGOTuningFilePath", params.m_MLGOTuningFilePath }
});
+
armnn::BackendOptions cpuAcc("CpuAcc",
{
{ "FastMathEnabled", params.m_EnableFastMath },