ITensorHandleFactory::FactoryId srcFactoryId,
const Layer& layer,
const Layer& connectedLayer,
- TensorHandleFactoryRegistry& registry)
+ TensorHandleFactoryRegistry& registry,
+ bool importEnabled)
{
auto toBackend = backends.find(connectedLayer.GetBackendId());
ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
// Search for export/import options
ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
- if (srcFactory->GetExportFlags() != 0)
+ if (srcFactory->GetExportFlags() != 0 && importEnabled)
{
for (auto&& pref : dstPrefs)
{
OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
BackendsMap& backends,
TensorHandleFactoryRegistry& registry,
+ bool importEnabled,
Optional<std::vector<std::string>&> errMessages)
{
OptimizationResult result;
- optGraph.ForEachLayer([&backends, ®istry, &result, &errMessages](Layer* layer)
+ optGraph.ForEachLayer([&backends, ®istry, &result, &errMessages, importEnabled](Layer* layer)
{
ARMNN_ASSERT(layer);
{
const Layer& connectedLayer = connection->GetOwningLayer();
- EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer, registry);
+ EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
+ registry, importEnabled);
if (strategy == EdgeStrategy::Undefined)
{
OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
backends,
tensorHandleFactoryRegistry,
+ options.m_ImportEnabled,
messages);
if (strategyResult.m_Error)
{
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ OptimizerOptions optOptions;
+ optOptions.m_ImportEnabled = true;
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Graph& graph = optNetObjPtr->GetGraph();
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ OptimizerOptions optOptions;
+ optOptions.m_ImportEnabled = true;
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Graph& graph = optNetObjPtr->GetGraph();
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ OptimizerOptions optOptions;
+ optOptions.m_ImportEnabled = true;
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Graph& graph = optNetObjPtr->GetGraph();
// optimize the network
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ OptimizerOptions optOptions;
+ optOptions.m_ImportEnabled = true;
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
Graph& graph = optNetObjPtr->GetGraph();
BOOST_TEST(outputData == expectedOutput);
}
+BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
+{
+ using namespace armnn;
+
+ // Create a mock backend object
+ MockImportBackendInitialiser initialiser; // Register the Mock Backend
+ auto backendObjPtr = CreateBackendObject(MockImportBackendId());
+ BOOST_TEST((backendObjPtr != nullptr));
+
+ BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
+ if (backendIds.find("MockRef") == backendIds.end())
+ {
+ std::string message = "Cannot load MockRef";
+ BOOST_FAIL(message);
+ }
+
+ // Create runtime in which test will run and allow fallback to CpuRef.
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
+ IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
+ IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
+ IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+ IConnectableLayer* add = net->AddAdditionLayer("add");
+ IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+ input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(sub->GetInputSlot(1));
+ input2->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ sub->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+
+ input0->GetOutputSlot(0).SetTensorInfo(info);
+ input1->GetOutputSlot(0).SetTensorInfo(info);
+ input2->GetOutputSlot(0).SetTensorInfo(info);
+ sub->GetOutputSlot(0).SetTensorInfo(info);
+ add->GetOutputSlot(0).SetTensorInfo(info);
+
+ // optimize the network
+ std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+ Graph& graph = optNetObjPtr->GetGraph();
+
+ armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
+ armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
+ armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "input2");
+ armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "sub");
+ armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "[ sub (0) -> add (1) ]");
+ armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "add");
+ armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
+
+ // Checks order is valid.
+ BOOST_TEST(CheckOrder(graph, layer0, layer1));
+ BOOST_TEST(CheckOrder(graph, layer1, layer2));
+ BOOST_TEST(CheckOrder(graph, layer2, layer3));
+ BOOST_TEST(CheckOrder(graph, layer3, layer4));
+ BOOST_TEST(CheckOrder(graph, layer4, layer5));
+ BOOST_TEST(CheckOrder(graph, layer5, layer6));
+
+ // Load it into the runtime. It should pass.
+ NetworkId netId;
+ std::string ignoredErrorMessage;
+ INetworkProperties networkProperties(false, false);
+
+ runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
+
+ // Creates structures for input & output
+ std::vector<float> inputData0
+ {
+ 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 0.0f
+ };
+ std::vector<float> inputData1
+ {
+ 0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
+ };
+ std::vector<float> inputData2
+ {
+ 12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+ };
+
+ std::vector<float> outputData(12);
+
+ std::vector<float> expectedOutput
+ {
+ 13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
+ };
+
+ InputTensors inputTensors
+ {
+ { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
+ { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
+ { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+ };
+ OutputTensors outputTensors
+ {
+ { 0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
+ };
+
+ runtime->GetProfiler(netId)->EnableProfiling(true);
+
+ // Do the inference
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Retrieve the Profiler.Print() output to get the workload execution
+ ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+ std::stringstream ss;
+ profilerManager.GetProfiler()->Print(ss);;
+ std::string dump = ss.str();
+
+ // Contains CopyMemGeneric between the backends
+ std::size_t found = dump.find("CopyMemGeneric");
+ BOOST_TEST(found != std::string::npos);
+
+ // Does not contain ImportMemGeneric
+ found = dump.find("ImportMemGeneric");
+ BOOST_TEST(found == std::string::npos);
+
+ // Use memory import between backends
+ BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+
+ // Check output is as expected
+ BOOST_TEST(outputData == expectedOutput);
+}
+
BOOST_AUTO_TEST_SUITE_END()