Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / src / include / engine_impl.h
index ea1234a..3f81b76 100644 (file)
@@ -22,7 +22,6 @@
 #include "refcounted_obj.h"
 #include "implementation_map.h"
 #include "memory_pool.h"
-
 #include "gpu/engine_info.h"
 
 #include <memory>
@@ -41,6 +40,7 @@ struct event_impl;
 struct topology_impl;
 struct program_impl;
 struct network_impl;
+struct program_node;
 
 template <class>
 struct typed_program_node;
@@ -49,9 +49,8 @@ struct engine_impl : public refcounted_obj<engine_impl>
 {
 public:
     engine_impl(const engine_configuration& conf);
-
+    ~engine_impl();
     engine_types type() const { return engine_types::ocl; }
-
     refcounted_obj_ptr<memory_impl> allocate_memory(layout layout);
     refcounted_obj_ptr<memory_impl> allocate_memory(layout layout, primitive_id, uint32_t, std::set<primitive_id>, bool reusable = true);
     refcounted_obj_ptr<memory_impl> reinterpret_buffer(const memory_impl& memory, layout new_layout);
@@ -60,11 +59,13 @@ public:
     refcounted_obj_ptr<event_impl> create_user_event(bool set = false);
     void wait_for_events(std::vector<event_impl::ptr> const& events);
 
-    refcounted_obj_ptr<program_impl> build_program(const topology_impl& topology, const build_options& options, bool is_internal = false);
+    refcounted_obj_ptr<program_impl> build_program(const topology_impl& topology, const build_options& options, bool is_internal = false, bool no_optimizations = false);
+    refcounted_obj_ptr<program_impl> build_program(const std::set<std::shared_ptr<program_node>>& nodes, const build_options & options, bool is_internal);
     void compile_program(program_impl& prog);
 
-    refcounted_obj_ptr<network_impl> allocate_network(const program_impl& program);
-    refcounted_obj_ptr<network_impl> build_network(const topology_impl& topology, const build_options& options, bool internal_network = false);
+    refcounted_obj_ptr<network_impl> allocate_network(const program_impl& program, bool is_internal = false);
+    refcounted_obj_ptr<network_impl> build_network(const topology_impl& topology, const build_options& options, bool is_internal = false);
+    refcounted_obj_ptr<network_impl> build_network(const std::set<std::shared_ptr<program_node>>& nodes, const build_options & options, bool is_internal);
     void flush_network();
     void release_pending_memory();
 
@@ -77,7 +78,23 @@ public:
         auto factory = implementation_map<T>::get(type(), node);
         return std::move(std::unique_ptr<primitive_impl>(factory(node)));
     }
-    
+
+    template <class T>
+    bool does_an_implementation_exist(typed_program_node<T> const& node)
+    {
+        if (&node.get_program().get_engine() != this)
+          throw std::invalid_argument("engine_impl::create_primitive_impl: program's engine does not match called engine");
+        return implementation_map<T>::check(type(), node);
+    }
+
+    template <class T>
+    bool does_possible_implementation_exist(typed_program_node<T> const& node)
+    {
+        if (&node.get_program().get_engine() != this)
+            throw std::invalid_argument("engine_impl::create_primitive_impl: program's engine does not match called engine");
+        return implementation_map<T>::check_io_eq(type(), node);
+    }
+
     const engine_configuration& configuration() const { return _configuration; }
     void set_mem_pool(bool flag) { _configuration.enable_memory_pool = flag; }
     std::shared_ptr<gpu_toolkit> get_context() const { return _context; }
@@ -97,4 +114,4 @@ private:
 };
 }
 
-API_CAST(::cldnn_engine, cldnn::engine_impl)
+API_CAST(::cldnn_engine, cldnn::engine_impl)
\ No newline at end of file