2ecfab97cd769f759f2d9553db0e7fcc881414fa
[platform/upstream/hailort.git] /
1 {
2  "cells": [
3   {
4    "cell_type": "markdown",
5    "metadata": {},
6    "source": [
7     "\n",
8     "# Python inference tutorial\n",
9     "\n",
10     "This tutorial will walk you through the inference process.\n",
11     "\n",
12     "**Requirements:**\n",
13     "\n",
14     "* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
15     "\n",
16     "It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials."
17    ]
18   },
19   {
20    "cell_type": "markdown",
21    "metadata": {},
22    "source": [
23     "## Standalone hardware deployment\n",
24     "\n",
25     "The standalone flow allows direct access to the HW, developing applications directly on top of Hailo\n",
26     "core HW, using HailoRT. This way we can use the Hailo hardware without Tensorflow, and\n",
27     "even without the Hailo SDK (after the HEF is built).\n",
28     "\n",
29     "An HEF is Hailo's binary format for neural networks. The HEF files contain:\n",
30     "\n",
31     "* Target HW configuration\n",
32     "* Weights\n",
33     "* Metadata for HailoRT (e.g. input/output scaling)\n",
34     "\n",
35     "First create the desired target object. In our example we use the Hailo-8 PCIe interface:\n"
36    ]
37   },
38   {
39    "cell_type": "code",
40    "execution_count": null,
41    "metadata": {},
42    "outputs": [],
43    "source": [
44     "import numpy as np\n",
45     "from multiprocessing import Process\n",
46     "from hailo_platform import (HEF, VDevice, HailoStreamInterface, InferVStreams, ConfigureParams,\n",
47     "    InputVStreamParams, OutputVStreamParams, InputVStreams, OutputVStreams, FormatType)\n",
48     "\n",
49     "# The target can be used as a context manager (\"with\" statement) to ensure it's released on time.\n",
50     "# Here it's avoided for the sake of simplicity\n",
51     "target = VDevice()\n",
52     "\n",
53     "# Loading compiled HEFs to device:\n",
54     "model_name = 'resnet_v1_18'\n",
55     "hef_path = '../hefs/{}.hef'.format(model_name) \n",
56     "hef = HEF(hef_path)\n",
57     "    \n",
58     "# Configure network groups\n",
59     "configure_params = ConfigureParams.create_from_hef(hef=hef, interface=HailoStreamInterface.PCIe)\n",
60     "network_groups = target.configure(hef, configure_params)\n",
61     "network_group = network_groups[0]\n",
62     "network_group_params = network_group.create_params()\n",
63     "\n",
64     "# Create input and output virtual streams params\n",
65     "input_vstreams_params = InputVStreamParams.make(network_group, format_type=FormatType.FLOAT32)\n",
66     "output_vstreams_params = OutputVStreamParams.make(network_group, format_type=FormatType.UINT8)\n",
67     "\n",
68     "# Define dataset params\n",
69     "input_vstream_info = hef.get_input_vstream_infos()[0]\n",
70     "output_vstream_info = hef.get_output_vstream_infos()[0]\n",
71     "image_height, image_width, channels = input_vstream_info.shape\n",
72     "num_of_images = 10\n",
73     "low, high = 2, 20\n",
74     "\n",
75     "# Generate random dataset\n",
76     "dataset = np.random.randint(low, high, (num_of_images, image_height, image_width, channels)).astype(np.float32)"
77    ]
78   },
79   {
80    "cell_type": "markdown",
81    "metadata": {},
82    "source": [
83     "#### Running hardware inference\n",
84     "Infer the model and then display the output shape:"
85    ]
86   },
87   {
88    "cell_type": "code",
89    "execution_count": null,
90    "metadata": {},
91    "outputs": [],
92    "source": [
93     "# Infer \n",
94     "with InferVStreams(network_group, input_vstreams_params, output_vstreams_params) as infer_pipeline:\n",
95     "    input_data = {input_vstream_info.name: dataset}\n",
96     "    with network_group.activate(network_group_params):\n",
97     "        infer_results = infer_pipeline.infer(input_data)\n",
98     "        print('Stream output shape is {}'.format(infer_results[output_vstream_info.name].shape))"
99    ]
100   },
101   {
102    "cell_type": "markdown",
103    "metadata": {},
104    "source": [
105     "## Streaming inference\n",
106     "\n",
107     "This section shows how to run streaming inference using multiple processes in Python.\n",
108     "\n",
109     "Note: This flow is not supported on Windows.\n",
110     "\n",
111     "We will not use infer. Instead we will use a send and receive model.\n",
112     "The send function and the receive function will run in different processes."
113    ]
114   },
115   {
116    "cell_type": "markdown",
117    "metadata": {},
118    "source": [
119     "Define the send and receive functions:"
120    ]
121   },
122   {
123    "cell_type": "code",
124    "execution_count": null,
125    "metadata": {},
126    "outputs": [],
127    "source": [
128     "def send(configured_network, num_frames):\n",
129     "    configured_network.wait_for_activation(1000)\n",
130     "    vstreams_params = InputVStreamParams.make(configured_network)\n",
131     "    with InputVStreams(configured_network, vstreams_params) as vstreams:\n",
132     "        vstream_to_buffer = {vstream: np.ndarray([1] + list(vstream.shape), dtype=vstream.dtype) for vstream in vstreams}\n",
133     "        for _ in range(num_frames):\n",
134     "            for vstream, buff in vstream_to_buffer.items():\n",
135     "                vstream.send(buff)\n",
136     "\n",
137     "def recv(configured_network, vstreams_params, num_frames):\n",
138     "    configured_network.wait_for_activation(1000)\n",
139     "    with OutputVStreams(configured_network, vstreams_params) as vstreams:\n",
140     "        for _ in range(num_frames):\n",
141     "            for vstream in vstreams:\n",
142     "                data = vstream.recv()\n",
143     "\n",
144     "def recv_all(configured_network, num_frames):\n",
145     "    vstreams_params_groups = OutputVStreamParams.make_groups(configured_network)\n",
146     "    recv_procs = []\n",
147     "    for vstreams_params in vstreams_params_groups:\n",
148     "        proc = Process(target=recv, args=(configured_network, vstreams_params, num_frames))\n",
149     "        proc.start()\n",
150     "        recv_procs.append(proc)\n",
151     "    for proc in recv_procs:\n",
152     "        proc.join()"
153    ]
154   },
155   {
156    "cell_type": "markdown",
157    "metadata": {},
158    "source": [
159     "Define the amount of frames to stream, define the processes, create the target and run processes:\n"
160    ]
161   },
162   {
163    "cell_type": "code",
164    "execution_count": null,
165    "metadata": {},
166    "outputs": [],
167    "source": [
168     "# Define the amount of frames to stream\n",
169     "num_of_frames = 1000\n",
170     "\n",
171     "send_process = Process(target=send, args=(network_group, num_of_frames))\n",
172     "recv_process = Process(target=recv_all, args=(network_group, num_of_frames))\n",
173     "recv_process.start()\n",
174     "send_process.start()\n",
175     "print('Starting streaming (hef=\\'{}\\', num_of_frames={})'.format(model_name, num_of_frames))\n",
176     "with network_group.activate(network_group_params):\n",
177     "    send_process.join()\n",
178     "    recv_process.join()\n",
179     "print('Done')\n",
180     "\n",
181     "target.release()"
182    ]
183   }
184  ],
185  "metadata": {
186   "kernelspec": {
187    "display_name": "Python 3",
188    "language": "python",
189    "name": "python3"
190   },
191   "language_info": {
192    "codemirror_mode": {
193     "name": "ipython",
194     "version": 3
195    },
196    "file_extension": ".py",
197    "mimetype": "text/x-python",
198    "name": "python",
199    "nbconvert_exporter": "python",
200    "pygments_lexer": "ipython3",
201    "version": "3.6.9"
202   }
203  },
204  "nbformat": 4,
205  "nbformat_minor": 2
206 }