#include <armnn/Version.hpp>
#include <backendsCommon/BackendRegistry.hpp>
+#include <backendsCommon/IBackendContext.hpp>
#include <iostream>
-#ifdef ARMCOMPUTECL_ENABLED
-#include <arm_compute/core/CL/OpenCL.h>
-#include <arm_compute/core/CL/CLKernelLibrary.h>
-#include <arm_compute/runtime/CL/CLScheduler.h>
-#endif
-
#include <boost/log/trivial.hpp>
#include <boost/polymorphic_cast.hpp>
std::string & errorMessage)
{
IOptimizedNetwork* rawNetwork = inNetwork.release();
+
+ networkIdOut = GenerateNetworkId();
+
+ for (auto&& context : m_BackendContexts)
+ {
+ context.second->BeforeLoadNetwork(networkIdOut);
+ }
+
unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
std::unique_ptr<OptimizedNetwork>(boost::polymorphic_downcast<OptimizedNetwork*>(rawNetwork)),
errorMessage);
return Status::Failure;
}
- networkIdOut = GenerateNetworkId();
-
{
std::lock_guard<std::mutex> lockGuard(m_Mutex);
m_LoadedNetworks[networkIdOut] = std::move(loadedNetwork);
}
+ for (auto&& context : m_BackendContexts)
+ {
+ context.second->AfterLoadNetwork(networkIdOut);
+ }
+
return Status::Success;
}
Status Runtime::UnloadNetwork(NetworkId networkId)
{
-#ifdef ARMCOMPUTECL_ENABLED
- if (arm_compute::CLScheduler::get().context()() != NULL)
+ bool unloadOk = true;
+ for (auto&& context : m_BackendContexts)
{
- // Waits for all queued CL requests to finish before unloading the network they may be using.
- try
- {
- // Coverity fix: arm_compute::CLScheduler::sync() may throw an exception of type cl::Error.
- arm_compute::CLScheduler::get().sync();
- }
- catch (const cl::Error&)
- {
- BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): an error occurred while waiting for "
- "the queued CL requests to finish";
- return Status::Failure;
- }
+ unloadOk &= context.second->BeforeUnloadNetwork(networkId);
+ }
+
+ if (!unloadOk)
+ {
+ BOOST_LOG_TRIVIAL(warning) << "Runtime::UnloadNetwork(): failed to unload "
+ "network with ID:" << networkId << " because BeforeUnloadNetwork failed";
+ return Status::Failure;
}
-#endif
{
std::lock_guard<std::mutex> lockGuard(m_Mutex);
BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): " << networkId << " not found!";
return Status::Failure;
}
+ }
-#ifdef ARMCOMPUTECL_ENABLED
- if (arm_compute::CLScheduler::get().context()() != NULL && m_LoadedNetworks.empty())
- {
- // There are no loaded networks left, so clear the CL cache to free up memory
- m_ClContextControl.ClearClCache();
- }
-#endif
+ for (auto&& context : m_BackendContexts)
+ {
+ context.second->AfterUnloadNetwork(networkId);
}
BOOST_LOG_TRIVIAL(debug) << "Runtime::UnloadNetwork(): Unloaded network with ID: " << networkId;
}
Runtime::Runtime(const CreationOptions& options)
- : m_ClContextControl(options.m_GpuAccTunedParameters.get(),
- options.m_EnableGpuProfiling)
- , m_NetworkIdCounter(0)
+ : m_NetworkIdCounter(0)
, m_DeviceSpec{BackendRegistryInstance().GetBackendIds()}
{
BOOST_LOG_TRIVIAL(info) << "ArmNN v" << ARMNN_VERSION << "\n";
+
+ for (const auto& id : BackendRegistryInstance().GetBackendIds())
+ {
+ // Store backend contexts for the supported ones
+ if (m_DeviceSpec.GetSupportedBackends().count(id) > 0)
+ {
+ auto factoryFun = BackendRegistryInstance().GetFactory(id);
+ auto backend = factoryFun();
+ BOOST_ASSERT(backend.get() != nullptr);
+
+ auto context = backend->CreateBackendContext(options);
+
+ // backends are allowed to return nullptrs if they
+ // don't wish to create a backend specific context
+ if (context)
+ {
+ m_BackendContexts.emplace(std::make_pair(id, std::move(context)));
+ }
+ }
+ }
}
Runtime::~Runtime()
#include <armnn/Tensor.hpp>
#include <armnn/BackendId.hpp>
-#include <cl/ClContextControl.hpp>
-
#include <mutex>
#include <unordered_map>
mutable std::mutex m_Mutex;
std::unordered_map<NetworkId, std::unique_ptr<LoadedNetwork>> m_LoadedNetworks;
-
- ClContextControl m_ClContextControl;
+ std::unordered_map<BackendId, IBackendInternal::IBackendContextPtr> m_BackendContexts;
int m_NetworkIdCounter;
//
#pragma once
-#include "IBackendInternal.hpp"
#include "RegistryCommon.hpp"
-
#include <armnn/Types.hpp>
namespace armnn
{
-
+class IBackendInternal;
+using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
using BackendRegistry = RegistryCommon<IBackendInternal, IBackendInternalUniquePtr>;
BackendRegistry& BackendRegistryInstance();
static const char * Name() { return "IBackend"; }
};
-} // namespace armnn
+} // namespace armnn
\ No newline at end of file
CpuTensorHandleFwd.hpp
CpuTensorHandle.hpp
IBackendInternal.hpp
+ IBackendContext.hpp
ILayerSupport.cpp
ITensorHandle.hpp
LayerSupportRegistry.cpp
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/IRuntime.hpp>
+#include <memory>
+
+namespace armnn
+{
+
+class IBackendContext
+{
+protected:
+ IBackendContext(const IRuntime::CreationOptions&) {}
+
+public:
+ // Before and after Load network events
+ virtual bool BeforeLoadNetwork(NetworkId networkId) = 0;
+ virtual bool AfterLoadNetwork(NetworkId networkId) = 0;
+
+ // Before and after Unload network events
+ virtual bool BeforeUnloadNetwork(NetworkId networkId) = 0;
+ virtual bool AfterUnloadNetwork(NetworkId networkId) = 0;
+
+ virtual ~IBackendContext() {}
+};
+
+using IBackendContextUniquePtr = std::unique_ptr<IBackendContext>;
+
+} // namespace armnn
\ No newline at end of file
#pragma once
#include <armnn/Types.hpp>
+#include <armnn/IRuntime.hpp>
namespace armnn
{
class IWorkloadFactory;
+class IBackendContext;
class IBackendInternal : public IBackend
{
~IBackendInternal() override = default;
using IWorkloadFactoryPtr = std::unique_ptr<IWorkloadFactory>;
+ using IBackendContextPtr = std::unique_ptr<IBackendContext>;
+
virtual IWorkloadFactoryPtr CreateWorkloadFactory() const = 0;
+ virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const = 0;
};
using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
#include <armnn/Types.hpp>
#include <backendsCommon/BackendRegistry.hpp>
+#include <backendsCommon/IBackendInternal.hpp>
#include <boost/test/unit_test.hpp>
list(APPEND armnnClBackend_sources
ClBackend.cpp
ClBackend.hpp
+ ClBackendContext.cpp
+ ClBackendContext.hpp
ClBackendId.hpp
ClContextControl.cpp
ClContextControl.hpp
#include "ClBackend.hpp"
#include "ClBackendId.hpp"
#include "ClWorkloadFactory.hpp"
+#include "ClBackendContext.hpp"
#include <backendsCommon/BackendRegistry.hpp>
return std::make_unique<ClWorkloadFactory>();
}
+IBackendInternal::IBackendContextPtr
+ClBackend::CreateBackendContext(const IRuntime::CreationOptions& options) const
+{
+ return IBackendContextPtr{new ClBackendContext{options}};
+}
+
+
} // namespace armnn
//
#pragma once
+#include <backendsCommon/IBackendContext.hpp>
#include <backendsCommon/IBackendInternal.hpp>
namespace armnn
static const BackendId& GetIdStatic();
const BackendId& GetId() const override { return GetIdStatic(); }
- IWorkloadFactoryPtr CreateWorkloadFactory() const override;
+ IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory() const override;
+ IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
};
} // namespace armnn
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClBackendContext.hpp"
+#include "ClContextControl.hpp"
+
+#include <boost/log/trivial.hpp>
+
+#ifdef ARMCOMPUTECL_ENABLED
+#include <arm_compute/core/CL/OpenCL.h>
+#include <arm_compute/core/CL/CLKernelLibrary.h>
+#include <arm_compute/runtime/CL/CLScheduler.h>
+#endif
+
+namespace armnn
+{
+
+struct ClBackendContext::ClContextControlWrapper
+{
+ ClContextControlWrapper(IGpuAccTunedParameters* clTunedParameters,
+ bool profilingEnabled)
+ : m_ClContextControl(clTunedParameters, profilingEnabled)
+ {}
+
+ bool Sync()
+ {
+#ifdef ARMCOMPUTECL_ENABLED
+ if (arm_compute::CLScheduler::get().context()() != NULL)
+ {
+ // Waits for all queued CL requests to finish before unloading the network they may be using.
+ try
+ {
+ // Coverity fix: arm_compute::CLScheduler::sync() may throw an exception of type cl::Error.
+ arm_compute::CLScheduler::get().sync();
+ }
+ catch (const cl::Error&)
+ {
+ BOOST_LOG_TRIVIAL(warning) << "WARNING: Runtime::UnloadNetwork(): an error occurred while waiting for "
+ "the queued CL requests to finish";
+ return false;
+ }
+ }
+#endif
+ return true;
+ }
+
+ void ClearClCache()
+ {
+#ifdef ARMCOMPUTECL_ENABLED
+ if (arm_compute::CLScheduler::get().context()() != NULL)
+ {
+ // There are no loaded networks left, so clear the CL cache to free up memory
+ m_ClContextControl.ClearClCache();
+ }
+#endif
+ }
+
+
+ ClContextControl m_ClContextControl;
+};
+
+
+ClBackendContext::ClBackendContext(const IRuntime::CreationOptions& options)
+ : IBackendContext(options)
+ , m_ClContextControlWrapper(
+ std::make_unique<ClContextControlWrapper>(options.m_GpuAccTunedParameters.get(),
+ options.m_EnableGpuProfiling))
+{
+}
+
+bool ClBackendContext::BeforeLoadNetwork(NetworkId)
+{
+ return true;
+}
+
+bool ClBackendContext::AfterLoadNetwork(NetworkId networkId)
+{
+ {
+ std::lock_guard<std::mutex> lockGuard(m_Mutex);
+ m_NetworkIds.insert(networkId);
+ }
+ return true;
+}
+
+bool ClBackendContext::BeforeUnloadNetwork(NetworkId)
+{
+ return m_ClContextControlWrapper->Sync();
+}
+
+bool ClBackendContext::AfterUnloadNetwork(NetworkId networkId)
+{
+ bool clearCache = false;
+ {
+ std::lock_guard<std::mutex> lockGuard(m_Mutex);
+ m_NetworkIds.erase(networkId);
+ clearCache = m_NetworkIds.empty();
+ }
+
+ if (clearCache)
+ {
+ m_ClContextControlWrapper->ClearClCache();
+ }
+
+ return true;
+}
+
+ClBackendContext::~ClBackendContext()
+{
+}
+
+} // namespace armnn
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <backendsCommon/IBackendContext.hpp>
+#include <unordered_set>
+#include <mutex>
+
+namespace armnn
+{
+
+class ClBackendContext : public IBackendContext
+{
+public:
+ ClBackendContext(const IRuntime::CreationOptions& options);
+
+ bool BeforeLoadNetwork(NetworkId networkId) override;
+ bool AfterLoadNetwork(NetworkId networkId) override;
+
+ bool BeforeUnloadNetwork(NetworkId networkId) override;
+ bool AfterUnloadNetwork(NetworkId networkId) override;
+
+ ~ClBackendContext() override;
+
+private:
+ std::mutex m_Mutex;
+ struct ClContextControlWrapper;
+ std::unique_ptr<ClContextControlWrapper> m_ClContextControlWrapper;
+
+ std::unordered_set<NetworkId> m_NetworkIds;
+
+};
+
+} // namespace armnn
\ No newline at end of file
BACKEND_SOURCES := \
ClBackend.cpp \
+ ClBackendContext.cpp \
ClContextControl.cpp \
ClLayerSupport.cpp \
ClWorkloadFactory.cpp \
//
#pragma once
+#include <backendsCommon/IBackendContext.hpp>
#include <backendsCommon/IBackendInternal.hpp>
namespace armnn
const BackendId& GetId() const override { return GetIdStatic(); }
IWorkloadFactoryPtr CreateWorkloadFactory() const override;
+
+ IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
+ {
+ return IBackendContextPtr{};
+ }
};
} // namespace armnn
\ No newline at end of file
//
#pragma once
+#include <backendsCommon/IBackendContext.hpp>
#include <backendsCommon/IBackendInternal.hpp>
namespace armnn
static const BackendId& GetIdStatic();
const BackendId& GetId() const override { return GetIdStatic(); }
- IWorkloadFactoryPtr CreateWorkloadFactory() const override;
+ IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory() const override;
+
+ IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override
+ {
+ return IBackendContextPtr{};
+ }
};
} // namespace armnn
\ No newline at end of file
InferenceTestImage.cpp)
add_library_ex(inferenceTest STATIC ${inference_test_sources})
target_include_directories(inferenceTest PRIVATE ../src/armnnUtils)
-target_include_directories(inferenceTest PRIVATE ../src)
+target_include_directories(inferenceTest PRIVATE ../src/backends)
if(BUILD_CAFFE_PARSER)
macro(CaffeParserTest testName sources)
add_executable_ex(${testName} ${sources})
target_include_directories(${testName} PRIVATE ../src/armnnUtils)
- target_include_directories(${testName} PRIVATE ../src)
+ target_include_directories(${testName} PRIVATE ../src/backends)
set_target_properties(${testName} PROPERTIES COMPILE_FLAGS "${CAFFE_PARSER_TEST_ADDITIONAL_COMPILE_FLAGS}")
target_link_libraries(${testName} inferenceTest)
macro(TfParserTest testName sources)
add_executable_ex(${testName} ${sources})
target_include_directories(${testName} PRIVATE ../src/armnnUtils)
- target_include_directories(${testName} PRIVATE ../src)
+ target_include_directories(${testName} PRIVATE ../src/backends)
target_link_libraries(${testName} inferenceTest)
target_link_libraries(${testName} armnnTfParser)
macro(TfLiteParserTest testName sources)
add_executable_ex(${testName} ${sources})
target_include_directories(${testName} PRIVATE ../src/armnnUtils)
- target_include_directories(${testName} PRIVATE ../src)
+ target_include_directories(${testName} PRIVATE ../src/backends)
target_link_libraries(${testName} inferenceTest)
target_link_libraries(${testName} armnnTfLiteParser)
macro(OnnxParserTest testName sources)
add_executable_ex(${testName} ${sources})
target_include_directories(${testName} PRIVATE ../src/armnnUtils)
- target_include_directories(${testName} PRIVATE ../src)
+ target_include_directories(${testName} PRIVATE ../src/backends)
target_link_libraries(${testName} inferenceTest)
target_link_libraries(${testName} armnnOnnxParser)
add_executable_ex(ExecuteNetwork ${ExecuteNetwork_sources})
target_include_directories(ExecuteNetwork PRIVATE ../src/armnn)
target_include_directories(ExecuteNetwork PRIVATE ../src/armnnUtils)
- target_include_directories(ExecuteNetwork PRIVATE ../src)
+ target_include_directories(ExecuteNetwork PRIVATE ../src/backends)
if (BUILD_CAFFE_PARSER)
target_link_libraries(ExecuteNetwork armnnCaffeParser)
#include <armnnOnnxParser/IOnnxParser.hpp>
#endif
-#include <backends/backendsCommon/BackendRegistry.hpp>
+#include <backendsCommon/BackendRegistry.hpp>
#include <boost/exception/exception.hpp>
#include <boost/exception/diagnostic_information.hpp>