1 #include <api/CPP/memory.hpp>
2 #include <api/CPP/topology.hpp>
3 #include <api/CPP/reorder.hpp>
4 #include <api/CPP/input_layout.hpp>
5 #include <api/CPP/convolution.hpp>
6 #include <api/CPP/data.hpp>
7 #include <api/CPP/pooling.hpp>
8 #include <api/CPP/fully_connected.hpp>
9 #include <api/CPP/softmax.hpp>
10 #include <api/CPP/engine.hpp>
11 #include <api/CPP/network.hpp>
15 using namespace cldnn;
18 const tensor::value_type
21 conv1_out_channels = 20,
22 conv2_out_channels = 50,
27 // Create layout with same sizes but new format.
28 layout create_reordering_layout(format new_format, const layout& src_layout)
30 return { src_layout.data_type, new_format, src_layout.size };
33 // Create MNIST topology
34 topology create_topology(const layout& in_layout, const memory& conv1_weights_mem, const memory& conv1_bias_mem )
36 auto data_type = in_layout.data_type;
38 // Create input_layout description
39 // "input" - is the primitive id inside topology
40 input_layout input("input", in_layout);
42 // Create topology object with 2 primitives
43 cldnn:: topology topology(
44 // 1. input layout primitive.
46 // 2. reorder primitive with id "reorder_input"
47 reorder("reorder_input",
48 // input primitive for reorder (implicitly converted to primitive_id)
50 // output layout for reorder
51 create_reordering_layout(format::yxfb, in_layout))
54 // Create data primitive - its content should be set already.
55 cldnn::data conv1_weights( "conv1_weights", conv1_weights_mem );
57 // Add primitive to topology
58 topology.add(conv1_weights);
60 // Emplace new primitive to topology
61 topology.add<cldnn::data>({ "conv1_bias", conv1_bias_mem });
63 // Emplace 2 primitives
65 // Convolution primitive with id "conv1"
67 "reorder_input", // primitive id of the convolution's input
68 { conv1_weights }, // weights primitive id is taken from the object
69 { "conv1_bias" } // bias primitive id
71 // Pooling id: "pool1"
73 "conv1", // Input: "conv1"
74 pooling_mode::max, // Pooling mode: MAX
75 spatial(2,2), // stride: 2
76 spatial(2,2) // kernel_size: 2
80 // Conv2 weights data is not available now, so just declare its layout
81 layout conv2_weights_layout(data_type, format::bfyx,{ conv2_out_channels, conv1_out_channels, conv_krnl_size, conv_krnl_size });
83 // Define the rest of topology.
85 // Input layout for conv2 weights. Data will passed by network::set_input_data()
86 input_layout("conv2_weights", conv2_weights_layout),
87 // Input layout for conv2 bias.
88 input_layout("conv2_bias", { data_type, format::bfyx, spatial(conv2_out_channels) }),
89 // Second convolution id: "conv2"
91 "pool1", // Input: "pool1"
92 { "conv2_weights" }, // Weights: input_layout "conv2_weights"
93 { "conv2_bias" } // Bias: input_layout "conv2_bias"
95 // Second pooling id: "pool2"
97 "conv2", // Input: "conv2"
98 pooling_mode::max, // Pooling mode: MAX
99 spatial(2, 2), // stride: 2
100 spatial(2, 2) // kernel_size: 2
102 // Fully connected (inner product) primitive id "fc1"
103 fully_connected("fc1",
104 "pool2", // Input: "pool2"
105 "fc1_weights", // "fc1_weights" will be added to the topology later
106 "fc1_bias", // will be defined later
107 true // Use built-in Relu. Slope is set to 0 by default.
109 // Second FC/IP primitive id: "fc2", input: "fc1".
110 // Weights ("fc2_weights") and biases ("fc2_bias") will be defined later.
111 // Built-in Relu is disabled by default.
112 fully_connected("fc2", "fc1", "fc2_weights", "fc2_bias"),
113 // The "softmax" primitive is not an input for any other,
114 // so it will be automatically added to network outputs.
115 softmax("softmax", "fc2")
120 // Copy from a vector to cldnn::memory
121 void copy_to_memory(memory& mem, const vector<float>& src)
123 cldnn::pointer<float> dst(mem);
124 std::copy(src.begin(), src.end(), dst.begin());
128 int recognize_image(network& network, const memory& input_memory)
130 // Set/update network input
131 network.set_input_data("input", input_memory);
133 // Start network execution
134 auto outputs = network.execute();
136 // get_memory() blocks output generation completed
137 auto output = outputs.at("softmax").get_memory();
139 // Get direct access to output memory
140 cldnn::pointer<float> out_ptr(output);
143 auto max_element_pos = max_element(out_ptr.begin(), out_ptr.end());
144 return static_cast<int>(distance(out_ptr.begin(), max_element_pos));
147 // User-defined helpers which are out of this example scope
148 // //////////////////////////////////////////////////////////////
149 // Loads file to a vector of floats.
150 vector<float> load_data(const string&) { return{ 0 }; }
152 // Allocates memory and loads data from file.
153 // Memory layout is taken from file.
154 memory load_mem(const engine& eng, const string&) {
155 //return a dummy value
156 return memory::allocate(eng, layout{ data_types::f32, format::bfyx, { 1, 1, 1, 1 } });
159 // Load image, resize to [x,y] and store in a vector of floats
160 // in the order "bfyx".
161 vector<float> load_image_bfyx(const string&, int, int) { return{ 0 }; }
162 // //////////////////////////////////////////////////////////////
166 // Use data type: float
167 auto data_type = type_to_data_type<float>::value;
169 // Network input layout
171 data_type, // stored data type
172 format::bfyx, // data stored in order batch-channel-Y-X, where X coordinate changes first.
173 {1, input_channels, input_size, input_size} // batch: 1, channels: 1, Y: 28, X: 28
176 // Create memory for conv1 weights
177 layout conv1_weights_layout(data_type, format::bfyx,{ conv1_out_channels, input_channels, conv_krnl_size, conv_krnl_size });
178 vector<float> my_own_buffer = load_data("conv1_weights.bin");
179 // The conv1_weights_mem is attached to my_own_buffer, so my_own_buffer should not be changed or descroyed until network execution completion.
180 auto conv1_weights_mem = memory::attach(conv1_weights_layout, my_own_buffer.data(), my_own_buffer.size());
182 // Create default engine
183 cldnn::engine engine;
185 // Create memory for conv1 bias
186 layout conv1_bias_layout(data_type, format::bfyx, spatial(20));
187 // Memory allocation requires engine
188 auto conv1_bias_mem = memory::allocate(engine, conv1_bias_layout);
189 // The memory is allocated by library, so we do not need to care about buffer lifetime.
190 copy_to_memory(conv1_bias_mem, load_data("conv1_bias.bin"));
193 cldnn::topology topology = create_topology(in_layout, conv1_weights_mem, conv1_bias_mem);
195 // Define network data not defined in create_topology()
197 cldnn::data("fc1_weights", load_mem(engine, "fc1_weights.data")),
198 cldnn::data("fc1_bias", load_mem(engine, "fc1_bias.data")),
199 cldnn::data("fc2_weights", load_mem(engine, "fc2_weights.data")),
200 cldnn::data("fc2_bias", load_mem(engine, "fc2_bias.data"))
203 // Build the network. Allow implicit data optimizations.
204 // The "softmax" primitive is not used as an input for other primitives,
205 // so we do not need to explicitly select it in build_options::outputs()
206 cldnn::network network(engine, topology, { build_option::optimize_data(true) });
208 // Set network data which was not known at topology creation.
209 network.set_input_data("conv2_weights", load_mem(engine, "conv2_weights.data"));
210 network.set_input_data("conv2_bias", load_mem(engine, "conv2_bias.data"));
212 // Allocate memory for input image.
213 auto input_memory = memory::allocate(engine, in_layout);
215 // Run network 2 times with different images.
216 for (auto img_name : { "one.jpg", "two.jpg" })
218 // Reuse image memory.
219 copy_to_memory(input_memory, load_image_bfyx("one.jpg", in_layout.size.spatial[0], in_layout.size.spatial[1]));
220 auto result = recognize_image(network, input_memory);
221 cout << img_name << " recognized as" << result << endl;