projects
/
platform
/
upstream
/
dldt.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Publishing 2019 R3.1 content
[platform/upstream/dldt.git]
/
inference-engine
/
include
/
cldnn
/
cldnn_config.hpp
diff --git
a/inference-engine/include/cldnn/cldnn_config.hpp
b/inference-engine/include/cldnn/cldnn_config.hpp
index
3fac6a2
..
64ded2d
100644
(file)
--- a/
inference-engine/include/cldnn/cldnn_config.hpp
+++ b/
inference-engine/include/cldnn/cldnn_config.hpp
@@
-1,5
+1,4
@@
-// Copyright (C) 2018 Intel Corporation
-//
+// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// SPDX-License-Identifier: Apache-2.0
//
@@
-12,10
+11,13
@@
#pragma once
#include <string>
#pragma once
#include <string>
-#include "
../
ie_plugin_config.hpp"
+#include "ie_plugin_config.hpp"
namespace InferenceEngine {
namespace InferenceEngine {
+/**
+ * @brief GPU plugin configuration
+ */
namespace CLDNNConfigParams {
/**
namespace CLDNNConfigParams {
/**
@@
-57,5
+59,10
@@
DECLARE_CLDNN_CONFIG_KEY(GRAPH_DUMPS_DIR);
*/
DECLARE_CLDNN_CONFIG_KEY(SOURCES_DUMPS_DIR);
*/
DECLARE_CLDNN_CONFIG_KEY(SOURCES_DUMPS_DIR);
+/**
+* @brief This key turns usage of int8 optimizations and qunatized models on.
+*/
+DECLARE_CLDNN_CONFIG_KEY(INT8_ENABLED);
+
} // namespace CLDNNConfigParams
} // namespace InferenceEngine
} // namespace CLDNNConfigParams
} // namespace InferenceEngine