2 * I/O functions for libusb
3 * Copyright (C) 2007-2008 Daniel Drake <dsd@gentoo.org>
4 * Copyright (c) 2001 Johannes Erdfelt <johannes@erdfelt.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
35 /* this is a list of in-flight transfer handles, sorted by timeout expiration.
36 * URBs to timeout the soonest are placed at the beginning of the list, URBs
37 * that will time out later are placed after, and urbs with infinite timeout
38 * are always placed at the very end. */
39 static struct list_head flying_transfers;
40 static pthread_mutex_t flying_transfers_lock = PTHREAD_MUTEX_INITIALIZER;
42 /* list of poll fd's */
43 static struct list_head pollfds;
44 static pthread_mutex_t pollfds_lock = PTHREAD_MUTEX_INITIALIZER;
46 /* user callbacks for pollfd changes */
47 static libusb_pollfd_added_cb fd_added_cb = NULL;
48 static libusb_pollfd_removed_cb fd_removed_cb = NULL;
50 /* this lock ensures that only one thread is handling events at any one time */
51 static pthread_mutex_t events_lock = PTHREAD_MUTEX_INITIALIZER;
53 /* used to see if there is an active thread doing event handling */
54 static int event_handler_active = 0;
56 /* used to wait for event completion in threads other than the one that is
58 static pthread_mutex_t event_waiters_lock = PTHREAD_MUTEX_INITIALIZER;
59 static pthread_cond_t event_waiters_cond = PTHREAD_COND_INITIALIZER;
62 * \page io Synchronous and asynchronous device I/O
64 * \section intro Introduction
66 * If you're using libusb in your application, you're probably wanting to
67 * perform I/O with devices - you want to perform USB data transfers.
69 * libusb offers two separate interfaces for device I/O. This page aims to
70 * introduce the two in order to help you decide which one is more suitable
71 * for your application. You can also choose to use both interfaces in your
72 * application by considering each transfer on a case-by-case basis.
74 * Once you have read through the following discussion, you should consult the
75 * detailed API documentation pages for the details:
79 * \section theory Transfers at a logical level
81 * At a logical level, USB transfers typically happen in two parts. For
82 * example, when reading data from a endpoint:
83 * -# A request for data is sent to the device
84 * -# Some time later, the incoming data is received by the host
86 * or when writing data to an endpoint:
88 * -# The data is sent to the device
89 * -# Some time later, the host receives acknowledgement from the device that
90 * the data has been transferred.
92 * There may be an indefinite delay between the two steps. Consider a
93 * fictional USB input device with a button that the user can press. In order
94 * to determine when the button is pressed, you would likely submit a request
95 * to read data on a bulk or interrupt endpoint and wait for data to arrive.
96 * Data will arrive when the button is pressed by the user, which is
97 * potentially hours later.
99 * libusb offers both a synchronous and an asynchronous interface to performing
100 * USB transfers. The main difference is that the synchronous interface
101 * combines both steps indicated above into a single function call, whereas
102 * the asynchronous interface separates them.
104 * \section sync The synchronous interface
106 * The synchronous I/O interface allows you to perform a USB transfer with
107 * a single function call. When the function call returns, the transfer has
108 * completed and you can parse the results.
110 * If you have used the libusb-0.1 before, this I/O style will seem familar to
111 * you. libusb-0.1 only offered a synchronous interface.
113 * In our input device example, to read button presses you might write code
114 * in the following style:
116 unsigned char data[4];
118 int r = libusb_bulk_transfer(handle, EP_IN, data, sizeof(data), &actual_length, 0);
119 if (r == 0 && actual_length == sizeof(data)) {
120 // results of the transaction can now be found in the data buffer
121 // parse them here and report button press
127 * The main advantage of this model is simplicity: you did everything with
128 * a single simple function call.
130 * However, this interface has its limitations. Your application will sleep
131 * inside libusb_bulk_transfer() until the transaction has completed. If it
132 * takes the user 3 hours to press the button, your application will be
133 * sleeping for that long. Execution will be tied up inside the library -
134 * the entire thread will be useless for that duration.
136 * Another issue is that by tieing up the thread with that single transaction
137 * there is no possibility of performing I/O with multiple endpoints and/or
138 * multiple devices simultaneously, unless you resort to creating one thread
141 * Additionally, there is no opportunity to cancel the transfer after the
142 * request has been submitted.
144 * For details on how to use the synchronous API, see the
145 * \ref syncio "synchronous I/O API documentation" pages.
147 * \section async The asynchronous interface
149 * Asynchronous I/O is the most significant new feature in libusb-1.0.
150 * Although it is a more complex interface, it solves all the issues detailed
153 * Instead of providing which functions that block until the I/O has complete,
154 * libusb's asynchronous interface presents non-blocking functions which
155 * begin a transfer and then return immediately. Your application passes a
156 * callback function pointer to this non-blocking function, which libusb will
157 * call with the results of the transaction when it has completed.
159 * Transfers which have been submitted through the non-blocking functions
160 * can be cancelled with a separate function call.
162 * The non-blocking nature of this interface allows you to be simultaneously
163 * performing I/O to multiple endpoints on multiple devices, without having
166 * This added flexibility does come with some complications though:
167 * - In the interest of being a lightweight library, libusb does not create
168 * threads and can only operate when your application is calling into it. Your
169 * application must call into libusb from it's main loop when events are ready
170 * to be handled, or you must use some other scheme to allow libusb to
171 * undertake whatever work needs to be done.
172 * - libusb also needs to be called into at certain fixed points in time in
173 * order to accurately handle transfer timeouts.
174 * - Memory handling becomes more complex. You cannot use stack memory unless
175 * the function with that stack is guaranteed not to return until the transfer
176 * callback has finished executing.
177 * - You generally lose some linearity from your code flow because submitting
178 * the transfer request is done in a separate function from where the transfer
179 * results are handled. This becomes particularly obvious when you want to
180 * submit a second transfer based on the results of an earlier transfer.
182 * Internally, libusb's synchronous interface is expressed in terms of function
183 * calls to the asynchronous interface.
185 * For details on how to use the asynchronous API, see the
186 * \ref asyncio "asynchronous I/O API" documentation pages.
191 * \page packetoverflow Packets and overflows
193 * \section packets Packet abstraction
195 * The USB specifications describe how data is transmitted in packets, with
196 * constraints on packet size defined by endpoint descriptors. The host must
197 * not send data payloads larger than the endpoint's maximum packet size.
199 * libusb and the underlying OS abstract out the packet concept, allowing you
200 * to request transfers of any size. Internally, the request will be divided
201 * up into correctly-sized packets. You do not have to be concerned with
202 * packet sizes, but there is one exception when considering overflows.
204 * \section overflow Bulk/interrupt transfer overflows
206 * When requesting data on a bulk endpoint, libusb requires you to supply a
207 * buffer and the maximum number of bytes of data that libusb can put in that
208 * buffer. However, the size of the buffer is not communicated to the device -
209 * the device is just asked to send any amount of data.
211 * There is no problem if the device sends an amount of data that is less than
212 * or equal to the buffer size. libusb reports this condition to you through
213 * the \ref libusb_transfer::actual_length "libusb_transfer.actual_length"
216 * Problems may occur if the device attempts to send more data than can fit in
217 * the buffer. libusb reports LIBUSB_TRANSFER_OVERFLOW for this condition but
218 * other behaviour is largely undefined: actual_length may or may not be
219 * accurate, the chunk of data that can fit in the buffer (before overflow)
220 * may or may not have been transferred.
222 * Overflows are nasty, but can be avoided. Even though you were told to
223 * ignore packets above, think about the lower level details: each transfer is
224 * split into packets (typically small, with a maximum size of 512 bytes).
225 * Overflows can only happen if the final packet in an incoming data transfer
226 * is smaller than the actual packet that the device wants to transfer.
227 * Therefore, you will never see an overflow if your transfer buffer size is a
228 * multiple of the endpoint's packet size: the final packet will either
229 * fill up completely or will be only partially filled.
233 * @defgroup asyncio Asynchronous device I/O
235 * This page details libusb's asynchronous (non-blocking) API for USB device
236 * I/O. This interface is very powerful but is also quite complex - you will
237 * need to read this page carefully to understand the necessary considerations
238 * and issues surrounding use of this interface. Simplistic applications
239 * may wish to consider the \ref syncio "synchronous I/O API" instead.
241 * The asynchronous interface is built around the idea of separating transfer
242 * submission and handling of transfer completion (the synchronous model
243 * combines both of these into one). There may be a long delay between
244 * submission and completion, however the asynchronous submission function
245 * is non-blocking so will return control to your application during that
246 * potentially long delay.
248 * \section asyncabstraction Transfer abstraction
250 * For the asynchronous I/O, libusb implements the concept of a generic
251 * transfer entity for all types of I/O (control, bulk, interrupt,
252 * isochronous). The generic transfer object must be treated slightly
253 * differently depending on which type of I/O you are performing with it.
255 * This is represented by the public libusb_transfer structure type.
257 * \section asynctrf Asynchronous transfers
259 * We can view asynchronous I/O as a 5 step process:
263 * -# Completion handling
266 * \subsection asyncalloc Allocation
268 * This step involves allocating memory for a USB transfer. This is the
269 * generic transfer object mentioned above. At this stage, the transfer
270 * is "blank" with no details about what type of I/O it will be used for.
272 * Allocation is done with the libusb_alloc_transfer() function. You must use
273 * this function rather than allocating your own transfers.
275 * \subsection asyncfill Filling
277 * This step is where you take a previously allocated transfer and fill it
278 * with information to determine the message type and direction, data buffer,
279 * callback function, etc.
281 * You can either fill the required fields yourself or you can use the
282 * helper functions: libusb_fill_control_transfer(), libusb_fill_bulk_transfer()
283 * and libusb_fill_interrupt_transfer().
285 * \subsection asyncsubmit Submission
287 * When you have allocated a transfer and filled it, you can submit it using
288 * libusb_submit_transfer(). This function returns immediately but can be
289 * regarded as firing off the I/O request in the background.
291 * \subsection asynccomplete Completion handling
293 * After a transfer has been submitted, one of four things can happen to it:
295 * - The transfer completes (i.e. some data was transferred)
296 * - The transfer has a timeout and the timeout expires before all data is
298 * - The transfer fails due to an error
299 * - The transfer is cancelled
301 * Each of these will cause the user-specified transfer callback function to
302 * be invoked. It is up to the callback function to determine which of the
303 * above actually happened and to act accordingly.
305 * \subsection Deallocation
307 * When a transfer has completed (i.e. the callback function has been invoked),
308 * you are advised to free the transfer (unless you wish to resubmit it, see
309 * below). Transfers are deallocated with libusb_free_transfer().
311 * It is undefined behaviour to free a transfer which has not completed.
313 * \section asyncresubmit Resubmission
315 * You may be wondering why allocation, filling, and submission are all
316 * separated above where they could reasonably be combined into a single
319 * The reason for separation is to allow you to resubmit transfers without
320 * having to allocate new ones every time. This is especially useful for
321 * common situations dealing with interrupt endpoints - you allocate one
322 * transfer, fill and submit it, and when it returns with results you just
323 * resubmit it for the next interrupt.
325 * \section asynccancel Cancellation
327 * Another advantage of using the asynchronous interface is that you have
328 * the ability to cancel transfers which have not yet completed. This is
329 * done by calling the libusb_cancel_transfer() function.
331 * libusb_cancel_transfer() is asynchronous/non-blocking in itself. When the
332 * cancellation actually completes, the transfer's callback function will
333 * be invoked, and the callback function should check the transfer status to
334 * determine that it was cancelled.
336 * Freeing the transfer after it has been cancelled but before cancellation
337 * has completed will result in undefined behaviour.
339 * \section bulk_overflows Overflows on device-to-host bulk/interrupt endpoints
341 * If your device does not have predictable transfer sizes (or it misbehaves),
342 * your application may submit a request for data on an IN endpoint which is
343 * smaller than the data that the device wishes to send. In some circumstances
344 * this will cause an overflow, which is a nasty condition to deal with. See
345 * the \ref packetoverflow page for discussion.
347 * \section asyncctrl Considerations for control transfers
349 * The <tt>libusb_transfer</tt> structure is generic and hence does not
350 * include specific fields for the control-specific setup packet structure.
352 * In order to perform a control transfer, you must place the 8-byte setup
353 * packet at the start of the data buffer. To simplify this, you could
354 * cast the buffer pointer to type struct libusb_control_setup, or you can
355 * use the helper function libusb_fill_control_setup().
357 * The wLength field placed in the setup packet must be the length you would
358 * expect to be sent in the setup packet: the length of the payload that
359 * follows (or the expected maximum number of bytes to receive). However,
360 * the length field of the libusb_transfer object must be the length of
361 * the data buffer - i.e. it should be wLength <em>plus</em> the size of
362 * the setup packet (LIBUSB_CONTROL_SETUP_SIZE).
364 * If you use the helper functions, this is simplified for you:
365 * -# Allocate a buffer of size LIBUSB_CONTROL_SETUP_SIZE plus the size of the
366 * data you are sending/requesting.
367 * -# Call libusb_fill_control_setup() on the data buffer, using the transfer
368 * request size as the wLength value (i.e. do not include the extra space you
369 * allocated for the control setup).
370 * -# If this is a host-to-device transfer, place the data to be transferred
371 * in the data buffer, starting at offset LIBUSB_CONTROL_SETUP_SIZE.
372 * -# Call libusb_fill_control_transfer() to associate the data buffer with
373 * the transfer (and to set the remaining details such as callback and timeout).
374 * - Note that there is no parameter to set the length field of the transfer.
375 * The length is automatically inferred from the wLength field of the setup
377 * -# Submit the transfer.
379 * The multi-byte control setup fields (wValue, wIndex and wLength) must
380 * be given in little-endian byte order (the endianness of the USB bus).
381 * Endianness conversion is transparently handled by
382 * libusb_fill_control_setup() which is documented to accept host-endian
385 * Further considerations are needed when handling transfer completion in
386 * your callback function:
387 * - As you might expect, the setup packet will still be sitting at the start
388 * of the data buffer.
389 * - If this was a device-to-host transfer, the received data will be sitting
390 * at offset LIBUSB_CONTROL_SETUP_SIZE into the buffer.
391 * - The actual_length field of the transfer structure is relative to the
392 * wLength of the setup packet, rather than the size of the data buffer. So,
393 * if your wLength was 4, your transfer's <tt>length</tt> was 12, then you
394 * should expect an <tt>actual_length</tt> of 4 to indicate that the data was
395 * transferred in entirity.
397 * To simplify parsing of setup packets and obtaining the data from the
398 * correct offset, you may wish to use the libusb_control_transfer_get_data()
399 * and libusb_control_transfer_get_setup() functions within your transfer
402 * Even though control endpoints do not halt, a completed control transfer
403 * may have a LIBUSB_TRANSFER_STALL status code. This indicates the control
404 * request was not supported.
406 * \section asyncintr Considerations for interrupt transfers
408 * All interrupt transfers are performed using the polling interval presented
409 * by the bInterval value of the endpoint descriptor.
411 * \section asynciso Considerations for isochronous transfers
413 * Isochronous transfers are more complicated than transfers to
414 * non-isochronous endpoints.
416 * To perform I/O to an isochronous endpoint, allocate the transfer by calling
417 * libusb_alloc_transfer() with an appropriate number of isochronous packets.
419 * During filling, set \ref libusb_transfer::type "type" to
420 * \ref libusb_transfer_type::LIBUSB_TRANSFER_TYPE_ISOCHRONOUS
421 * "LIBUSB_TRANSFER_TYPE_ISOCHRONOUS", and set
422 * \ref libusb_transfer::num_iso_packets "num_iso_packets" to a value less than
423 * or equal to the number of packets you requested during allocation.
424 * libusb_alloc_transfer() does not set either of these fields for you, given
425 * that you might not even use the transfer on an isochronous endpoint.
427 * Next, populate the length field for the first num_iso_packets entries in
428 * the \ref libusb_transfer::iso_packet_desc "iso_packet_desc" array. Section
429 * 5.6.3 of the USB2 specifications describe how the maximum isochronous
430 * packet length is determined by wMaxPacketSize field in the endpoint
431 * descriptor. Two functions can help you here:
433 * - libusb_get_max_packet_size() is an easy way to determine the max
434 * packet size for an endpoint.
435 * - libusb_set_iso_packet_lengths() assigns the same length to all packets
436 * within a transfer, which is usually what you want.
438 * For outgoing transfers, you'll obviously fill the buffer and populate the
439 * packet descriptors in hope that all the data gets transferred. For incoming
440 * transfers, you must ensure the buffer has sufficient capacity for
441 * the situation where all packets transfer the full amount of requested data.
443 * Completion handling requires some extra consideration. The
444 * \ref libusb_transfer::actual_length "actual_length" field of the transfer
445 * is meaningless and should not be examined; instead you must refer to the
446 * \ref libusb_iso_packet_descriptor::actual_length "actual_length" field of
447 * each individual packet.
449 * The \ref libusb_transfer::status "status" field of the transfer is also a
451 * - If the packets were submitted and the isochronous data microframes
452 * completed normally, status will have value
453 * \ref libusb_transfer_status::LIBUSB_TRANSFER_COMPLETED
454 * "LIBUSB_TRANSFER_COMPLETED". Note that bus errors and software-incurred
455 * delays are not counted as transfer errors; the transfer.status field may
456 * indicate COMPLETED even if some or all of the packets failed. Refer to
457 * the \ref libusb_iso_packet_descriptor::status "status" field of each
458 * individual packet to determine packet failures.
459 * - The status field will have value
460 * \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR
461 * "LIBUSB_TRANSFER_ERROR" only when serious errors were encountered.
462 * - Other transfer status codes occur with normal behaviour.
464 * The data for each packet will be found at an offset into the buffer that
465 * can be calculated as if each prior packet completed in full. The
466 * libusb_get_iso_packet_buffer() and libusb_get_iso_packet_buffer_simple()
467 * functions may help you here.
469 * \section asyncmem Memory caveats
471 * In most circumstances, it is not safe to use stack memory for transfer
472 * buffers. This is because the function that fired off the asynchronous
473 * transfer may return before libusb has finished using the buffer, and when
474 * the function returns it's stack gets destroyed. This is true for both
475 * host-to-device and device-to-host transfers.
477 * The only case in which it is safe to use stack memory is where you can
478 * guarantee that the function owning the stack space for the buffer does not
479 * return until after the transfer's callback function has completed. In every
480 * other case, you need to use heap memory instead.
482 * \section asyncflags Fine control
484 * Through using this asynchronous interface, you may find yourself repeating
485 * a few simple operations many times. You can apply a bitwise OR of certain
486 * flags to a transfer to simplify certain things:
487 * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_SHORT_NOT_OK
488 * "LIBUSB_TRANSFER_SHORT_NOT_OK" results in transfers which transferred
489 * less than the requested amount of data being marked with status
490 * \ref libusb_transfer_status::LIBUSB_TRANSFER_ERROR "LIBUSB_TRANSFER_ERROR"
491 * (they would normally be regarded as COMPLETED)
492 * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER
493 * "LIBUSB_TRANSFER_FREE_BUFFER" allows you to ask libusb to free the transfer
494 * buffer when freeing the transfer.
495 * - \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_TRANSFER
496 * "LIBUSB_TRANSFER_FREE_TRANSFER" causes libusb to automatically free the
497 * transfer after the transfer callback returns.
499 * \section asyncevent Event handling
501 * In accordance of the aim of being a lightweight library, libusb does not
502 * create threads internally. This means that libusb code does not execute
503 * at any time other than when your application is calling a libusb function.
504 * However, an asynchronous model requires that libusb perform work at various
505 * points in time - namely processing the results of previously-submitted
506 * transfers and invoking the user-supplied callback function.
508 * This gives rise to the libusb_handle_events() function which your
509 * application must call into when libusb has work do to. This gives libusb
510 * the opportunity to reap pending transfers, invoke callbacks, etc.
512 * The first issue to discuss here is how your application can figure out
513 * when libusb has work to do. In fact, there are two naive options which
514 * do not actually require your application to know this:
515 * -# Periodically call libusb_handle_events() in non-blocking mode at fixed
516 * short intervals from your main loop
517 * -# Repeatedly call libusb_handle_events() in blocking mode from a dedicated
520 * The first option is plainly not very nice, and will cause unnecessary
521 * CPU wakeups leading to increased power usage and decreased battery life.
522 * The second option is not very nice either, but may be the nicest option
523 * available to you if the "proper" approach can not be applied to your
524 * application (read on...).
526 * The recommended option is to integrate libusb with your application main
527 * event loop. libusb exposes a set of file descriptors which allow you to do
528 * this. Your main loop is probably already calling poll() or select() or a
529 * variant on a set of file descriptors for other event sources (e.g. keyboard
530 * button presses, mouse movements, network sockets, etc). You then add
531 * libusb's file descriptors to your poll()/select() calls, and when activity
532 * is detected on such descriptors you know it is time to call
533 * libusb_handle_events().
535 * There is one final event handling complication. libusb supports
536 * asynchronous transfers which time out after a specified time period, and
537 * this requires that libusb is called into at or after the timeout so that
538 * the timeout can be handled. So, in addition to considering libusb's file
539 * descriptors in your main event loop, you must also consider that libusb
540 * sometimes needs to be called into at fixed points in time even when there
541 * is no file descriptor activity.
543 * For the details on retrieving the set of file descriptors and determining
544 * the next timeout, see the \ref poll "polling and timing" API documentation.
548 * @defgroup poll Polling and timing
550 * This page documents libusb's functions for polling events and timing.
551 * These functions are only necessary for users of the
552 * \ref asyncio "asynchronous API". If you are only using the simpler
553 * \ref syncio "synchronous API" then you do not need to ever call these
556 * The justification for the functionality described here has already been
557 * discussed in the \ref asyncevent "event handling" section of the
558 * asynchronous API documentation. In summary, libusb does not create internal
559 * threads for event processing and hence relies on your application calling
560 * into libusb at certain points in time so that pending events can be handled.
561 * In order to know precisely when libusb needs to be called into, libusb
562 * offers you a set of pollable file descriptors and information about when
563 * the next timeout expires.
565 * If you are using the asynchronous I/O API, you must take one of the two
566 * following options, otherwise your I/O will not complete.
568 * \section pollsimple The simple option
570 * If your application revolves solely around libusb and does not need to
571 * handle other event sources, you can have a program structure as follows:
574 // find and open device
575 // maybe fire off some initial async I/O
577 while (user_has_not_requested_exit)
578 libusb_handle_events();
583 * With such a simple main loop, you do not have to worry about managing
584 * sets of file descriptors or handling timeouts. libusb_handle_events() will
585 * handle those details internally.
587 * \section pollmain The more advanced option
589 * In more advanced applications, you will already have a main loop which
590 * is monitoring other event sources: network sockets, X11 events, mouse
591 * movements, etc. Through exposing a set of file descriptors, libusb is
592 * designed to cleanly integrate into such main loops.
594 * In addition to polling file descriptors for the other event sources, you
595 * take a set of file descriptors from libusb and monitor those too. When you
596 * detect activity on libusb's file descriptors, you call
597 * libusb_handle_events_timeout() in non-blocking mode.
599 * You must also consider the fact that libusb sometimes has to handle events
600 * at certain known times which do not generate activity on file descriptors.
601 * Your main loop must also consider these times, modify it's poll()/select()
602 * timeout accordingly, and track time so that libusb_handle_events_timeout()
603 * is called in non-blocking mode when timeouts expire.
605 * In pseudo-code, you want something that looks like:
610 while (user has not requested application exit) {
611 libusb_get_next_timeout();
612 select(on libusb file descriptors plus any other event sources of interest,
613 using a timeout no larger than the value libusb just suggested)
614 if (select() indicated activity on libusb file descriptors)
615 libusb_handle_events_timeout(0);
616 if (time has elapsed to or beyond the libusb timeout)
617 libusb_handle_events_timeout(0);
623 * The set of file descriptors that libusb uses as event sources may change
624 * during the life of your application. Rather than having to repeatedly
625 * call libusb_get_pollfds(), you can set up notification functions for when
626 * the file descriptor set changes using libusb_set_pollfd_notifiers().
628 * \section mtissues Multi-threaded considerations
630 * Unfortunately, the situation is complicated further when multiple threads
631 * come into play. If two threads are monitoring the same file descriptors,
632 * the fact that only one thread will be woken up when an event occurs causes
635 * The events lock, event waiters lock, and libusb_handle_events_locked()
636 * entities are added to solve these problems. You do not need to be concerned
637 * with these entities otherwise.
639 * See the extra documentation: \ref mtasync
642 /** \page mtasync Multi-threaded applications and asynchronous I/O
644 * libusb is a thread-safe library, but extra considerations must be applied
645 * to applications which interact with libusb from multiple threads.
647 * The underlying issue that must be addressed is that all libusb I/O
648 * revolves around monitoring file descriptors through the poll()/select()
649 * system calls. This is directly exposed at the
650 * \ref asyncio "asynchronous interface" but it is important to note that the
651 * \ref syncio "synchronous interface" is implemented on top of the
652 * asynchonrous interface, therefore the same considerations apply.
654 * The issue is that if two or more threads are concurrently calling poll()
655 * or select() on libusb's file descriptors then only one of those threads
656 * will be woken up when an event arrives. The others will be completely
657 * oblivious that anything has happened.
659 * Consider the following pseudo-code, which submits an asynchronous transfer
660 * then waits for its completion. This style is one way you could implement a
661 * synchronous interface on top of the asynchronous interface (and libusb
662 * does something similar, albeit more advanced due to the complications
663 * explained on this page).
666 void cb(struct libusb_transfer *transfer)
668 int *completed = transfer->user_data;
673 const struct timeval timeout = { 120, 0 };
674 struct libusb_transfer *transfer;
675 unsigned char buffer[LIBUSB_CONTROL_SETUP_SIZE];
678 transfer = libusb_alloc_transfer(0);
679 libusb_fill_control_setup(buffer,
680 LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_ENDPOINT_OUT, 0x04, 0x01, 0, 0);
681 libusb_fill_control_transfer(transfer, dev, buffer, cb, &completed, 1000);
682 libusb_submit_transfer(transfer);
685 poll(libusb file descriptors, 120*1000);
686 if (poll indicates activity)
687 libusb_handle_events_timeout(0);
689 printf("completed!");
694 * Here we are <em>serializing</em> completion of an asynchronous event
695 * against a condition - the condition being completion of a specific transfer.
696 * The poll() loop has a long timeout to minimize CPU usage during situations
697 * when nothing is happening (it could reasonably be unlimited).
699 * If this is the only thread that is polling libusb's file descriptors, there
700 * is no problem: there is no danger that another thread will swallow up the
701 * event that we are interested in. On the other hand, if there is another
702 * thread polling the same descriptors, there is a chance that it will receive
703 * the event that we were interested in. In this situation, <tt>myfunc()</tt>
704 * will only realise that the transfer has completed on the next iteration of
705 * the loop, <em>up to 120 seconds later.</em> Clearly a two-minute delay is
706 * undesirable, and don't even think about using short timeouts to circumvent
709 * The solution here is to ensure that no two threads are ever polling the
710 * file descriptors at the same time. A naive implementation of this would
711 * impact the capabilities of the library, so libusb offers the scheme
712 * documented below to ensure no loss of functionality.
714 * Before we go any further, it is worth mentioning that all libusb-wrapped
715 * event handling procedures fully adhere to the scheme documented below.
716 * This includes libusb_handle_events() and all the synchronous I/O functions -
717 * libusb hides this headache from you. You do not need to worry about any
718 * of these issues if you stick to that level.
720 * The problem is when we consider the fact that libusb exposes file
721 * descriptors to allow for you to integrate asynchronous USB I/O into
722 * existing main loops, effectively allowing you to do some work behind
723 * libusb's back. If you do take libusb's file descriptors and pass them to
724 * poll()/select() yourself, you need to be aware of the associated issues.
726 * \section eventlock The events lock
728 * The first concept to be introduced is the events lock. The events lock
729 * is used to serialize threads that want to handle events, such that only
730 * one thread is handling events at any one time.
732 * You must take the events lock before polling libusb file descriptors,
733 * using libusb_lock_events(). You must release the lock as soon as you have
734 * aborted your poll()/select() loop, using libusb_unlock_events().
736 * \section threadwait Letting other threads do the work for you
738 * Although the events lock is a critical part of the solution, it is not
739 * enough on it's own. You might wonder if the following is sufficient...
741 libusb_lock_events();
743 poll(libusb file descriptors, 120*1000);
744 if (poll indicates activity)
745 libusb_handle_events_timeout(0);
747 libusb_lock_events();
749 * ...and the answer is that it is not. This is because the transfer in the
750 * code shown above may take a long time (say 30 seconds) to complete, and
751 * the lock is not released until the transfer is completed.
753 * Another thread with similar code that wants to do event handling may be
754 * working with a transfer that completes after a few milliseconds. Despite
755 * having such a quick completion time, the other thread cannot check that
756 * status of its transfer until the code above has finished (30 seconds later)
757 * due to contention on the lock.
759 * To solve this, libusb offers you a mechanism to determine when another
760 * thread is handling events. It also offers a mechanism to block your thread
761 * until the event handling thread has completed an event (and this mechanism
762 * does not involve polling of file descriptors).
764 * After determining that another thread is currently handling events, you
765 * obtain the <em>event waiters</em> lock using libusb_lock_event_waiters().
766 * You then re-check that some other thread is still handling events, and if
767 * so, you call libusb_wait_for_event().
769 * libusb_wait_for_event() puts your application to sleep until an event
770 * occurs, or until a thread releases the events lock. When either of these
771 * things happen, your thread is woken up, and should re-check the condition
772 * it was waiting on. It should also re-check that another thread is handling
773 * events, and if not, it should start handling events itself.
775 * This looks like the following, as pseudo-code:
778 if (libusb_try_lock_events() == 0) {
779 // we obtained the event lock: do our own event handling
780 libusb_lock_events();
782 poll(libusb file descriptors, 120*1000);
783 if (poll indicates activity)
784 libusb_handle_events_locked(0);
786 libusb_unlock_events();
788 // another thread is doing event handling. wait for it to signal us that
789 // an event has completed
790 libusb_lock_event_waiters();
793 // now that we have the event waiters lock, double check that another
794 // thread is still handling events for us. (it may have ceased handling
795 // events in the time it took us to reach this point)
796 if (!libusb_event_handler_active()) {
797 // whoever was handling events is no longer doing so, try again
798 libusb_unlock_event_waiters();
802 libusb_wait_for_event();
804 libusb_unlock_event_waiters();
806 printf("completed!\n");
809 * We have now implemented code which can dynamically handle situations where
810 * nobody is handling events (so we should do it ourselves), and it can also
811 * handle situations where another thread is doing event handling (so we can
812 * piggyback onto them). It is also equipped to handle a combination of
813 * the two, for example, another thread is doing event handling, but for
814 * whatever reason it stops doing so before our condition is met, so we take
815 * over the event handling.
817 * Three functions were introduced in the above pseudo-code. Their importance
818 * should be apparent from the code shown above.
819 * -# libusb_try_lock_events() is a non-blocking function which attempts
820 * to acquire the events lock but returns a failure code if it is contended.
821 * -# libusb_handle_events_locked() is a variant of
822 * libusb_handle_events_timeout() that you can call while holding the
823 * events lock. libusb_handle_events_timeout() itself implements similar
824 * logic to the above, so be sure not to call it when you are
825 * "working behind libusb's back", as is the case here.
826 * -# libusb_event_handler_active() determines if someone is currently
827 * holding the events lock
829 * You might be wondering why there is no function to wake up all threads
830 * blocked on libusb_wait_for_event(). This is because libusb can do this
831 * internally: it will wake up all such threads when someone calls
832 * libusb_unlock_events() or when a transfer completes (at the point after its
833 * callback has returned).
835 * \subsection concl Closing remarks
837 * The above may seem a little complicated, but hopefully I have made it clear
838 * why such complications are necessary. Also, do not forget that this only
839 * applies to applications that take libusb's file descriptors and integrate
840 * them into their own polling loops.
842 * You may decide that it is OK for your multi-threaded application to ignore
843 * some of the rules and locks detailed above, because you don't think that
844 * two threads can ever be polling the descriptors at the same time. If that
845 * is the case, then that's good news for you because you don't have to worry.
846 * But be careful here; remember that the synchronous I/O functions do event
847 * handling internally. If you have one thread doing event handling in a loop
848 * (without implementing the rules and locking semantics documented above)
849 * and another trying to send a synchronous USB transfer, you will end up with
850 * two threads monitoring the same descriptors, and the above-described
851 * undesirable behaviour occuring. The solution is for your polling thread to
852 * play by the rules; the synchronous I/O functions do so, and this will result
853 * in them getting along in perfect harmony.
855 * If you do have a dedicated thread doing event handling, it is perfectly
856 * legal for it to take the event handling lock and never release it. Any
857 * synchronous I/O functions you call from other threads will transparently
858 * fall back to the "event waiters" mechanism detailed above.
863 list_init(&flying_transfers);
866 fd_removed_cb = NULL;
869 static int calculate_timeout(struct usbi_transfer *transfer)
872 struct timespec current_time;
873 unsigned int timeout =
874 __USBI_TRANSFER_TO_LIBUSB_TRANSFER(transfer)->timeout;
879 r = clock_gettime(CLOCK_MONOTONIC, ¤t_time);
881 usbi_err("failed to read monotonic clock, errno=%d", errno);
885 current_time.tv_sec += timeout / 1000;
886 current_time.tv_nsec += (timeout % 1000) * 1000000;
888 if (current_time.tv_nsec > 1000000000) {
889 current_time.tv_nsec -= 1000000000;
890 current_time.tv_sec++;
893 TIMESPEC_TO_TIMEVAL(&transfer->timeout, ¤t_time);
897 static void add_to_flying_list(struct usbi_transfer *transfer)
899 struct usbi_transfer *cur;
900 struct timeval *timeout = &transfer->timeout;
902 pthread_mutex_lock(&flying_transfers_lock);
904 /* if we have no other flying transfers, start the list with this one */
905 if (list_empty(&flying_transfers)) {
906 list_add(&transfer->list, &flying_transfers);
910 /* if we have infinite timeout, append to end of list */
911 if (!timerisset(timeout)) {
912 list_add_tail(&transfer->list, &flying_transfers);
916 /* otherwise, find appropriate place in list */
917 list_for_each_entry(cur, &flying_transfers, list) {
918 /* find first timeout that occurs after the transfer in question */
919 struct timeval *cur_tv = &cur->timeout;
921 if (!timerisset(cur_tv) || (cur_tv->tv_sec > timeout->tv_sec) ||
922 (cur_tv->tv_sec == timeout->tv_sec &&
923 cur_tv->tv_usec > timeout->tv_usec)) {
924 list_add_tail(&transfer->list, &cur->list);
929 /* otherwise we need to be inserted at the end */
930 list_add_tail(&transfer->list, &flying_transfers);
932 pthread_mutex_unlock(&flying_transfers_lock);
936 * Allocate a libusb transfer with a specified number of isochronous packet
937 * descriptors. The returned transfer is pre-initialized for you. When the new
938 * transfer is no longer needed, it should be freed with
939 * libusb_free_transfer().
941 * Transfers intended for non-isochronous endpoints (e.g. control, bulk,
942 * interrupt) should specify an iso_packets count of zero.
944 * For transfers intended for isochronous endpoints, specify an appropriate
945 * number of packet descriptors to be allocated as part of the transfer.
946 * The returned transfer is not specially initialized for isochronous I/O;
947 * you are still required to set the
948 * \ref libusb_transfer::num_iso_packets "num_iso_packets" and
949 * \ref libusb_transfer::type "type" fields accordingly.
951 * It is safe to allocate a transfer with some isochronous packets and then
952 * use it on a non-isochronous endpoint. If you do this, ensure that at time
953 * of submission, num_iso_packets is 0 and that type is set appropriately.
955 * \param iso_packets number of isochronous packet descriptors to allocate
956 * \returns a newly allocated transfer, or NULL on error
958 API_EXPORTED struct libusb_transfer *libusb_alloc_transfer(int iso_packets)
960 size_t os_alloc_size = usbi_backend->transfer_priv_size
961 + (usbi_backend->add_iso_packet_size * iso_packets);
962 int alloc_size = sizeof(struct usbi_transfer)
963 + sizeof(struct libusb_transfer)
964 + (sizeof(struct libusb_iso_packet_descriptor) * iso_packets)
966 struct usbi_transfer *itransfer = malloc(alloc_size);
970 memset(itransfer, 0, alloc_size);
971 itransfer->num_iso_packets = iso_packets;
972 return __USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer);
976 * Free a transfer structure. This should be called for all transfers
977 * allocated with libusb_alloc_transfer().
979 * If the \ref libusb_transfer_flags::LIBUSB_TRANSFER_FREE_BUFFER
980 * "LIBUSB_TRANSFER_FREE_BUFFER" flag is set and the transfer buffer is
981 * non-NULL, this function will also free the transfer buffer using the
982 * standard system memory allocator (e.g. free()).
984 * It is legal to call this function with a NULL transfer. In this case,
985 * the function will simply return safely.
987 * \param transfer the transfer to free
989 API_EXPORTED void libusb_free_transfer(struct libusb_transfer *transfer)
991 struct usbi_transfer *itransfer;
995 if (transfer->flags & LIBUSB_TRANSFER_FREE_BUFFER && transfer->buffer)
996 free(transfer->buffer);
998 itransfer = __LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer);
1002 /** \ingroup asyncio
1003 * Submit a transfer. This function will fire off the USB transfer and then
1004 * return immediately.
1006 * It is undefined behaviour to submit a transfer that has already been
1007 * submitted but has not yet completed.
1009 * \param transfer the transfer to submit
1010 * \returns 0 on success
1011 * \returns LIBUSB_ERROR_NO_DEVICE if the device has been disconnected
1012 * \returns another LIBUSB_ERROR code on other failure
1014 API_EXPORTED int libusb_submit_transfer(struct libusb_transfer *transfer)
1016 struct usbi_transfer *itransfer =
1017 __LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer);
1020 itransfer->transferred = 0;
1021 r = calculate_timeout(itransfer);
1023 return LIBUSB_ERROR_OTHER;
1025 add_to_flying_list(itransfer);
1026 r = usbi_backend->submit_transfer(itransfer);
1028 pthread_mutex_lock(&flying_transfers_lock);
1029 list_del(&itransfer->list);
1030 pthread_mutex_unlock(&flying_transfers_lock);
1036 /** \ingroup asyncio
1037 * Asynchronously cancel a previously submitted transfer.
1038 * It is undefined behaviour to call this function on a transfer that is
1039 * already being cancelled or has already completed.
1040 * This function returns immediately, but this does not indicate cancellation
1041 * is complete. Your callback function will be invoked at some later time
1042 * with a transfer status of
1043 * \ref libusb_transfer_status::LIBUSB_TRANSFER_CANCELLED
1044 * "LIBUSB_TRANSFER_CANCELLED."
1046 * \param transfer the transfer to cancel
1047 * \returns 0 on success
1048 * \returns a LIBUSB_ERROR code on failure
1050 API_EXPORTED int libusb_cancel_transfer(struct libusb_transfer *transfer)
1052 struct usbi_transfer *itransfer =
1053 __LIBUSB_TRANSFER_TO_USBI_TRANSFER(transfer);
1057 r = usbi_backend->cancel_transfer(itransfer);
1059 usbi_err("cancel transfer failed error %d", r);
1063 /* Handle completion of a transfer (completion might be an error condition).
1064 * This will invoke the user-supplied callback function, which may end up
1065 * freeing the transfer. Therefore you cannot use the transfer structure
1066 * after calling this function, and you should free all backend-specific
1067 * data before calling it. */
1068 void usbi_handle_transfer_completion(struct usbi_transfer *itransfer,
1069 enum libusb_transfer_status status)
1071 struct libusb_transfer *transfer =
1072 __USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer);
1075 pthread_mutex_lock(&flying_transfers_lock);
1076 list_del(&itransfer->list);
1077 pthread_mutex_unlock(&flying_transfers_lock);
1079 if (status == LIBUSB_TRANSFER_COMPLETED
1080 && transfer->flags & LIBUSB_TRANSFER_SHORT_NOT_OK) {
1081 int rqlen = transfer->length;
1082 if (transfer->type == LIBUSB_TRANSFER_TYPE_CONTROL)
1083 rqlen -= LIBUSB_CONTROL_SETUP_SIZE;
1084 if (rqlen != itransfer->transferred) {
1085 usbi_dbg("interpreting short transfer as error");
1086 status = LIBUSB_TRANSFER_ERROR;
1090 flags = transfer->flags;
1091 transfer->status = status;
1092 transfer->actual_length = itransfer->transferred;
1093 if (transfer->callback)
1094 transfer->callback(transfer);
1095 /* transfer might have been freed by the above call, do not use from
1097 if (flags & LIBUSB_TRANSFER_FREE_TRANSFER)
1098 libusb_free_transfer(transfer);
1099 pthread_mutex_lock(&event_waiters_lock);
1100 pthread_cond_broadcast(&event_waiters_cond);
1101 pthread_mutex_unlock(&event_waiters_lock);
1104 /* Similar to usbi_handle_transfer_completion() but exclusively for transfers
1105 * that were asynchronously cancelled. The same concerns w.r.t. freeing of
1106 * transfers exist here.
1108 void usbi_handle_transfer_cancellation(struct usbi_transfer *transfer)
1110 /* if the URB was cancelled due to timeout, report timeout to the user */
1111 if (transfer->flags & USBI_TRANSFER_TIMED_OUT) {
1112 usbi_dbg("detected timeout cancellation");
1113 usbi_handle_transfer_completion(transfer, LIBUSB_TRANSFER_TIMED_OUT);
1117 /* otherwise its a normal async cancel */
1118 usbi_handle_transfer_completion(transfer, LIBUSB_TRANSFER_CANCELLED);
1122 * Attempt to acquire the event handling lock. This lock is used to ensure that
1123 * only one thread is monitoring libusb event sources at any one time.
1125 * You only need to use this lock if you are developing an application
1126 * which calls poll() or select() on libusb's file descriptors directly.
1127 * If you stick to libusb's event handling loop functions (e.g.
1128 * libusb_handle_events()) then you do not need to be concerned with this
1131 * While holding this lock, you are trusted to actually be handling events.
1132 * If you are no longer handling events, you must call libusb_unlock_events()
1133 * as soon as possible.
1135 * \returns 0 if the lock was obtained successfully
1136 * \returns 1 if the lock was not obtained (i.e. another thread holds the lock)
1139 API_EXPORTED int libusb_try_lock_events(void)
1141 int r = pthread_mutex_trylock(&events_lock);
1145 event_handler_active = 1;
1150 * Acquire the event handling lock, blocking until successful acquisition if
1151 * it is contended. This lock is used to ensure that only one thread is
1152 * monitoring libusb event sources at any one time.
1154 * You only need to use this lock if you are developing an application
1155 * which calls poll() or select() on libusb's file descriptors directly.
1156 * If you stick to libusb's event handling loop functions (e.g.
1157 * libusb_handle_events()) then you do not need to be concerned with this
1160 * While holding this lock, you are trusted to actually be handling events.
1161 * If you are no longer handling events, you must call libusb_unlock_events()
1162 * as soon as possible.
1166 API_EXPORTED void libusb_lock_events(void)
1168 pthread_mutex_lock(&events_lock);
1169 event_handler_active = 1;
1173 * Release the lock previously acquired with libusb_try_lock_events() or
1174 * libusb_lock_events(). Releasing this lock will wake up any threads blocked
1175 * on libusb_wait_for_event().
1179 API_EXPORTED void libusb_unlock_events(void)
1181 event_handler_active = 0;
1182 pthread_mutex_unlock(&events_lock);
1184 pthread_mutex_lock(&event_waiters_lock);
1185 pthread_cond_broadcast(&event_waiters_cond);
1186 pthread_mutex_unlock(&event_waiters_lock);
1190 * Determine if an active thread is handling events (i.e. if anyone is holding
1191 * the event handling lock).
1193 * \returns 1 if a thread is handling events
1194 * \returns 0 if there are no threads currently handling events
1197 API_EXPORTED int libusb_event_handler_active(void)
1199 return event_handler_active;
1203 * Acquire the event waiters lock. This lock is designed to be obtained under
1204 * the situation where you want to be aware when events are completed, but
1205 * some other thread is event handling so calling libusb_handle_events() is not
1208 * You then obtain this lock, re-check that another thread is still handling
1209 * events, then call libusb_wait_for_event().
1211 * You only need to use this lock if you are developing an application
1212 * which calls poll() or select() on libusb's file descriptors directly,
1213 * <b>and</b> may potentially be handling events from 2 threads simultaenously.
1214 * If you stick to libusb's event handling loop functions (e.g.
1215 * libusb_handle_events()) then you do not need to be concerned with this
1220 API_EXPORTED void libusb_lock_event_waiters(void)
1222 pthread_mutex_lock(&event_waiters_lock);
1226 * Release the event waiters lock.
1229 API_EXPORTED void libusb_unlock_event_waiters(void)
1231 pthread_mutex_unlock(&event_waiters_lock);
1235 * Wait for another thread to signal completion of an event. Must be called
1236 * with the event waiters lock held, see libusb_lock_event_waiters().
1238 * This function will block until any of the following conditions are met:
1239 * -# The timeout expires
1240 * -# A transfer completes
1241 * -# A thread releases the event handling lock through libusb_unlock_events()
1243 * Condition 1 is obvious. Condition 2 unblocks your thread <em>after</em>
1244 * the callback for the transfer has completed. Condition 3 is important
1245 * because it means that the thread that was previously handling events is no
1246 * longer doing so, so if any events are to complete, another thread needs to
1247 * step up and start event handling.
1249 * This function releases the event waiters lock before putting your thread
1250 * to sleep, and reacquires the lock as it is being woken up.
1252 * \param tv maximum timeout for this blocking function. A NULL value
1253 * indicates unlimited timeout.
1254 * \returns 0 after a transfer completes or another thread stops event handling
1255 * \returns 1 if the timeout expired
1258 API_EXPORTED int libusb_wait_for_event(struct timeval *tv)
1260 struct timespec timeout;
1264 pthread_cond_wait(&event_waiters_cond, &event_waiters_lock);
1268 r = clock_gettime(CLOCK_REALTIME, &timeout);
1270 usbi_err("failed to read realtime clock, error %d", errno);
1271 return LIBUSB_ERROR_OTHER;
1274 timeout.tv_sec += tv->tv_sec;
1275 timeout.tv_nsec += tv->tv_usec * 1000;
1276 if (timeout.tv_nsec > 1000000000) {
1277 timeout.tv_nsec -= 1000000000;
1281 r = pthread_cond_timedwait(&event_waiters_cond, &event_waiters_lock,
1283 return (r == ETIMEDOUT);
1286 static void handle_timeout(struct usbi_transfer *itransfer)
1288 struct libusb_transfer *transfer =
1289 __USBI_TRANSFER_TO_LIBUSB_TRANSFER(itransfer);
1292 itransfer->flags |= USBI_TRANSFER_TIMED_OUT;
1293 r = libusb_cancel_transfer(transfer);
1295 usbi_warn("async cancel failed %d errno=%d", r, errno);
1298 static int handle_timeouts(void)
1300 struct timespec systime_ts;
1301 struct timeval systime;
1302 struct usbi_transfer *transfer;
1305 pthread_mutex_lock(&flying_transfers_lock);
1306 if (list_empty(&flying_transfers))
1309 /* get current time */
1310 r = clock_gettime(CLOCK_MONOTONIC, &systime_ts);
1314 TIMESPEC_TO_TIMEVAL(&systime, &systime_ts);
1316 /* iterate through flying transfers list, finding all transfers that
1317 * have expired timeouts */
1318 list_for_each_entry(transfer, &flying_transfers, list) {
1319 struct timeval *cur_tv = &transfer->timeout;
1321 /* if we've reached transfers of infinite timeout, we're all done */
1322 if (!timerisset(cur_tv))
1325 /* ignore timeouts we've already handled */
1326 if (transfer->flags & USBI_TRANSFER_TIMED_OUT)
1329 /* if transfer has non-expired timeout, nothing more to do */
1330 if ((cur_tv->tv_sec > systime.tv_sec) ||
1331 (cur_tv->tv_sec == systime.tv_sec &&
1332 cur_tv->tv_usec > systime.tv_usec))
1335 /* otherwise, we've got an expired timeout to handle */
1336 handle_timeout(transfer);
1340 pthread_mutex_unlock(&flying_transfers_lock);
1344 /* do the actual event handling. assumes that no other thread is concurrently
1345 * doing the same thing. */
1346 static int handle_events(struct timeval *tv)
1349 struct usbi_pollfd *ipollfd;
1355 pthread_mutex_lock(&pollfds_lock);
1356 list_for_each_entry(ipollfd, &pollfds, list)
1359 /* TODO: malloc when number of fd's changes, not on every poll */
1360 fds = malloc(sizeof(*fds) * nfds);
1362 return LIBUSB_ERROR_NO_MEM;
1364 list_for_each_entry(ipollfd, &pollfds, list) {
1365 struct libusb_pollfd *pollfd = &ipollfd->pollfd;
1366 int fd = pollfd->fd;
1369 fds[i].events = pollfd->events;
1372 pthread_mutex_unlock(&pollfds_lock);
1374 timeout_ms = (tv->tv_sec * 1000) + (tv->tv_usec / 1000);
1376 /* round up to next millisecond */
1377 if (tv->tv_usec % 1000)
1380 usbi_dbg("poll() %d fds with timeout in %dms", nfds, timeout_ms);
1381 r = poll(fds, nfds, timeout_ms);
1382 usbi_dbg("poll() returned %d", r);
1385 return handle_timeouts();
1386 } else if (r == -1 && errno == EINTR) {
1388 return LIBUSB_ERROR_INTERRUPTED;
1391 usbi_err("poll failed %d err=%d\n", r, errno);
1392 return LIBUSB_ERROR_IO;
1395 r = usbi_backend->handle_events(fds, nfds, r);
1397 usbi_err("backend handle_events failed with error %d", r);
1403 /* returns the smallest of:
1404 * 1. timeout of next URB
1405 * 2. user-supplied timeout
1406 * returns 1 if there is an already-expired timeout, otherwise returns 0
1409 static int get_next_timeout(struct timeval *tv, struct timeval *out)
1411 struct timeval timeout;
1412 int r = libusb_get_next_timeout(&timeout);
1414 /* timeout already expired? */
1415 if (!timerisset(&timeout))
1418 /* choose the smallest of next URB timeout or user specified timeout */
1419 if (timercmp(&timeout, tv, <))
1430 * Handle any pending events.
1432 * libusb determines "pending events" by checking if any timeouts have expired
1433 * and by checking the set of file descriptors for activity.
1435 * If a zero timeval is passed, this function will handle any already-pending
1436 * events and then immediately return in non-blocking style.
1438 * If a non-zero timeval is passed and no events are currently pending, this
1439 * function will block waiting for events to handle up until the specified
1440 * timeout. If an event arrives or a signal is raised, this function will
1443 * \param tv the maximum time to block waiting for events, or zero for
1445 * \returns 0 on success, or a LIBUSB_ERROR code on failure
1447 API_EXPORTED int libusb_handle_events_timeout(struct timeval *tv)
1450 struct timeval poll_timeout;
1452 r = get_next_timeout(tv, &poll_timeout);
1454 /* timeout already expired */
1455 return handle_timeouts();
1459 if (libusb_try_lock_events() == 0) {
1460 /* we obtained the event lock: do our own event handling */
1461 r = handle_events(&poll_timeout);
1462 libusb_unlock_events();
1466 /* another thread is doing event handling. wait for pthread events that
1467 * notify event completion. */
1468 libusb_lock_event_waiters();
1470 if (!libusb_event_handler_active()) {
1471 /* we hit a race: whoever was event handling earlier finished in the
1472 * time it took us to reach this point. try the cycle again. */
1473 libusb_unlock_event_waiters();
1474 usbi_dbg("event handler was active but went away, retrying");
1478 usbi_dbg("another thread is doing event handling");
1479 r = libusb_wait_for_event(&poll_timeout);
1480 libusb_unlock_event_waiters();
1485 return handle_timeouts();
1491 * Handle any pending events in blocking mode with a sensible timeout. This
1492 * timeout is currently hardcoded at 2 seconds but we may change this if we
1493 * decide other values are more sensible. For finer control over whether this
1494 * function is blocking or non-blocking, or the maximum timeout, use
1495 * libusb_handle_events_timeout() instead.
1497 * \returns 0 on success, or a LIBUSB_ERROR code on failure
1499 API_EXPORTED int libusb_handle_events(void)
1504 return libusb_handle_events_timeout(&tv);
1508 * Handle any pending events by polling file descriptors, without checking if
1509 * any other threads are already doing so. Must be called with the event lock
1510 * held, see libusb_lock_events().
1512 * This function is designed to be called under the situation where you have
1513 * taken the event lock and are calling poll()/select() directly on libusb's
1514 * file descriptors (as opposed to using libusb_handle_events() or similar).
1515 * You detect events on libusb's descriptors, so you then call this function
1516 * with a zero timeout value (while still holding the event lock).
1518 * \param tv the maximum time to block waiting for events, or zero for
1520 * \returns 0 on success, or a LIBUSB_ERROR code on failure
1523 API_EXPORTED int libusb_handle_events_locked(struct timeval *tv)
1526 struct timeval poll_timeout;
1528 r = get_next_timeout(tv, &poll_timeout);
1530 /* timeout already expired */
1531 return handle_timeouts();
1534 return handle_events(&poll_timeout);
1538 * Determine the next internal timeout that libusb needs to handle. You only
1539 * need to use this function if you are calling poll() or select() or similar
1540 * on libusb's file descriptors yourself - you do not need to use it if you
1541 * are calling libusb_handle_events() or a variant directly.
1543 * You should call this function in your main loop in order to determine how
1544 * long to wait for select() or poll() to return results. libusb needs to be
1545 * called into at this timeout, so you should use it as an upper bound on
1546 * your select() or poll() call.
1548 * When the timeout has expired, call into libusb_handle_events_timeout()
1549 * (perhaps in non-blocking mode) so that libusb can handle the timeout.
1551 * This function may return 1 (success) and an all-zero timeval. If this is
1552 * the case, it indicates that libusb has a timeout that has already expired
1553 * so you should call libusb_handle_events_timeout() or similar immediately.
1554 * A return code of 0 indicates that there are no pending timeouts.
1556 * \param tv output location for a relative time against the current
1557 * clock in which libusb must be called into in order to process timeout events
1558 * \returns 0 if there are no pending timeouts, 1 if a timeout was returned,
1559 * or LIBUSB_ERROR_OTHER on failure
1561 API_EXPORTED int libusb_get_next_timeout(struct timeval *tv)
1563 struct usbi_transfer *transfer;
1564 struct timespec cur_ts;
1565 struct timeval cur_tv;
1566 struct timeval *next_timeout;
1570 pthread_mutex_lock(&flying_transfers_lock);
1571 if (list_empty(&flying_transfers)) {
1572 pthread_mutex_unlock(&flying_transfers_lock);
1573 usbi_dbg("no URBs, no timeout!");
1577 /* find next transfer which hasn't already been processed as timed out */
1578 list_for_each_entry(transfer, &flying_transfers, list) {
1579 if (!(transfer->flags & USBI_TRANSFER_TIMED_OUT)) {
1584 pthread_mutex_unlock(&flying_transfers_lock);
1587 usbi_dbg("all URBs have already been processed for timeouts");
1591 next_timeout = &transfer->timeout;
1593 /* no timeout for next transfer */
1594 if (!timerisset(next_timeout)) {
1595 usbi_dbg("no URBs with timeouts, no timeout!");
1599 r = clock_gettime(CLOCK_MONOTONIC, &cur_ts);
1601 usbi_err("failed to read monotonic clock, errno=%d", errno);
1602 return LIBUSB_ERROR_OTHER;
1604 TIMESPEC_TO_TIMEVAL(&cur_tv, &cur_ts);
1606 if (timercmp(&cur_tv, next_timeout, >=)) {
1607 usbi_dbg("first timeout already expired");
1610 timersub(next_timeout, &cur_tv, tv);
1611 usbi_dbg("next timeout in %d.%06ds", tv->tv_sec, tv->tv_usec);
1618 * Register notification functions for file descriptor additions/removals.
1619 * These functions will be invoked for every new or removed file descriptor
1620 * that libusb uses as an event source.
1622 * To remove notifiers, pass NULL values for the function pointers.
1624 * \param added_cb pointer to function for addition notifications
1625 * \param removed_cb pointer to function for removal notifications
1627 API_EXPORTED void libusb_set_pollfd_notifiers(libusb_pollfd_added_cb added_cb,
1628 libusb_pollfd_removed_cb removed_cb)
1630 fd_added_cb = added_cb;
1631 fd_removed_cb = removed_cb;
1634 /* Add a file descriptor to the list of file descriptors to be monitored.
1635 * events should be specified as a bitmask of events passed to poll(), e.g.
1636 * POLLIN and/or POLLOUT. */
1637 int usbi_add_pollfd(int fd, short events)
1639 struct usbi_pollfd *ipollfd = malloc(sizeof(*ipollfd));
1641 return LIBUSB_ERROR_NO_MEM;
1643 usbi_dbg("add fd %d events %d", fd, events);
1644 ipollfd->pollfd.fd = fd;
1645 ipollfd->pollfd.events = events;
1646 pthread_mutex_lock(&pollfds_lock);
1647 list_add(&ipollfd->list, &pollfds);
1648 pthread_mutex_unlock(&pollfds_lock);
1651 fd_added_cb(fd, events);
1655 /* Remove a file descriptor from the list of file descriptors to be polled. */
1656 void usbi_remove_pollfd(int fd)
1658 struct usbi_pollfd *ipollfd;
1661 usbi_dbg("remove fd %d", fd);
1662 pthread_mutex_lock(&pollfds_lock);
1663 list_for_each_entry(ipollfd, &pollfds, list)
1664 if (ipollfd->pollfd.fd == fd) {
1670 usbi_dbg("couldn't find fd %d to remove", fd);
1671 pthread_mutex_unlock(&pollfds_lock);
1675 list_del(&ipollfd->list);
1676 pthread_mutex_unlock(&pollfds_lock);
1683 * Retrieve a list of file descriptors that should be polled by your main loop
1684 * as libusb event sources.
1686 * The returned list is NULL-terminated and should be freed with free() when
1687 * done. The actual list contents must not be touched.
1689 * \returns a NULL-terminated list of libusb_pollfd structures, or NULL on
1692 API_EXPORTED const struct libusb_pollfd **libusb_get_pollfds(void)
1694 struct libusb_pollfd **ret = NULL;
1695 struct usbi_pollfd *ipollfd;
1699 pthread_mutex_lock(&pollfds_lock);
1700 list_for_each_entry(ipollfd, &pollfds, list)
1703 ret = calloc(cnt + 1, sizeof(struct libusb_pollfd *));
1707 list_for_each_entry(ipollfd, &pollfds, list)
1708 ret[i++] = (struct libusb_pollfd *) ipollfd;
1712 pthread_mutex_unlock(&pollfds_lock);
1713 return (const struct libusb_pollfd **) ret;
1716 /* Backends call this from handle_events to report disconnection of a device.
1717 * The transfers get cancelled appropriately.
1719 void usbi_handle_disconnect(struct libusb_device_handle *handle)
1721 struct usbi_transfer *cur;
1722 struct usbi_transfer *to_cancel;
1724 usbi_dbg("device %d.%d",
1725 handle->dev->bus_number, handle->dev->device_address);
1727 /* terminate all pending transfers with the LIBUSB_TRANSFER_NO_DEVICE
1730 * this is a bit tricky because:
1731 * 1. we can't do transfer completion while holding flying_transfers_lock
1732 * 2. the transfers list can change underneath us - if we were to build a
1733 * list of transfers to complete (while holding look), the situation
1734 * might be different by the time we come to free them
1736 * so we resort to a loop-based approach as below
1737 * FIXME: is this still potentially racy?
1741 pthread_mutex_lock(&flying_transfers_lock);
1743 list_for_each_entry(cur, &flying_transfers, list)
1744 if (__USBI_TRANSFER_TO_LIBUSB_TRANSFER(cur)->dev_handle == handle) {
1748 pthread_mutex_unlock(&flying_transfers_lock);
1753 usbi_backend->clear_transfer_priv(to_cancel);
1754 usbi_handle_transfer_completion(to_cancel, LIBUSB_TRANSFER_NO_DEVICE);