ADD_DEFINITIONS("-DPREFIX=\"${CMAKE_INSTALL_PREFIX}\"")
ADD_DEFINITIONS("-DTIZEN_DEBUG")
+IF (${ARMNN_GRAPH_DEBUG})
+ MESSAGE("GRAPH DEBUG MODE enabled.")
+ ADD_DEFINITIONS(-DGRAPH_DEBUG)
+ENDIF()
+
SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}")
aux_source_directory(src SOURCES)
#include <queue>
#include <armnn/ArmNN.hpp>
+#include <armnn/backends/ITensorHandle.hpp>
#include <armnnTfLiteParser/ITfLiteParser.hpp>
namespace InferenceEngineImpl
return ret;
}
+ static const void GraphDebugCallback(armnn::LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
+ {
+ std::cout << "input tensor shape : " << tensor->GetShape() << ", ";
+ std::cout << "layerGuid : " << guid << ", ";
+ std::cout << "outputSlot : " << slotIndex << std::endl;
+
+ // TODO.
+ }
+
int InferenceARMNN::Load(std::vector<std::string> model_paths,
inference_model_format_e model_format)
{
// In default, add CpuRef as fallback.
mAccelType.push_back(armnn::Compute::CpuRef);
+ bool graph_debug = false;
+
+#if GRAPH_DEBUG
+ graph_debug = true;
+#endif
+ // First parameter is reduceFp32ToFp16, and second one is debug mode.
+ // In default, reduceFp32ToFp16 is false.
+ armnn::OptimizerOptions optimizerOptions(false, graph_debug);
+
// Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
- *mNetwork, mAccelType, sRuntime->GetDeviceSpec());
+ *mNetwork, mAccelType, sRuntime->GetDeviceSpec(), optimizerOptions);
if (!optimizedNet) {
LOGE("Fail to optimize network.");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
LOGI("Loaded the Network.");
+#if GRAPH_DEBUG
+ // Add debug layer to all layers of a given model, and call user given
+ // callback to debug each layer, GraphDebugCallback, while in inference.
+ // Ps. if you want to use default callback then set NULL instead of
+ // user given callback.
+ sRuntime->RegisterDebugCallback(mNetworkIdentifier, GraphDebugCallback);
+#endif
+
LOGI("LEAVE");
return ret;