1 /* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
26 /* Stuff that reads stuff from the server. */
42 #include <sys/select.h>
43 #include <sys/socket.h>
47 #include "xcb_windefs.h"
52 #define XCB_XGE_EVENT 35
55 xcb_generic_event_t *event;
56 struct event_list *next;
61 struct reply_list *next;
64 typedef struct pending_reply {
65 uint64_t first_request;
66 uint64_t last_request;
67 enum workarounds workaround;
69 struct pending_reply *next;
72 typedef struct reader_list {
75 struct reader_list *next;
78 static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
80 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
82 /* If you don't have what you're looking for now, you never
83 * will. Wake up and leave me alone. */
84 pthread_cond_signal((*prev_reader)->data);
85 *prev_reader = (*prev_reader)->next;
89 static int read_packet(xcb_connection_t *c)
91 xcb_generic_reply_t genrep;
93 int eventlength = 0; /* length after first 32 bytes for GenericEvents */
95 pending_reply *pend = 0;
96 struct event_list *event;
98 /* Wait for there to be enough data for us to read a whole packet */
99 if(c->in.queue_len < length)
102 /* Get the response type, length, and sequence number. */
103 memcpy(&genrep, c->in.queue, sizeof(genrep));
105 /* Compute 32-bit sequence number of this packet. */
106 if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
108 uint64_t lastread = c->in.request_read;
109 c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
110 if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
111 c->in.request_read += 0x10000;
112 if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
113 c->in.request_expected = c->in.request_read;
115 if(c->in.request_read != lastread)
117 if(c->in.current_reply)
119 _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
120 c->in.current_reply = 0;
121 c->in.current_reply_tail = &c->in.current_reply;
123 c->in.request_completed = c->in.request_read - 1;
126 while(c->in.pending_replies &&
127 c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
128 XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
130 pending_reply *oldpend = c->in.pending_replies;
131 c->in.pending_replies = oldpend->next;
133 c->in.pending_replies_tail = &c->in.pending_replies;
137 if(genrep.response_type == XCB_ERROR)
138 c->in.request_completed = c->in.request_read;
140 remove_finished_readers(&c->in.readers, c->in.request_completed);
143 if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
145 pend = c->in.pending_replies;
147 !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
148 (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
149 XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
153 /* For reply packets, check that the entire packet is available. */
154 if(genrep.response_type == XCB_REPLY)
156 if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
158 uint32_t *p = (uint32_t *) c->in.queue;
159 genrep.length = p[2] * p[3] * 2;
161 length += genrep.length * 4;
164 /* XGE events may have sizes > 32 */
165 if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
166 eventlength = genrep.length * 4;
168 buf = malloc(length + eventlength +
169 (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
172 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
176 if(_xcb_in_read_block(c, buf, length) <= 0)
182 /* pull in XGE event data if available, append after event struct */
185 if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
192 if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
198 if(genrep.response_type != XCB_REPLY)
199 ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
201 /* reply, or checked error */
202 if( genrep.response_type == XCB_REPLY ||
203 (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
205 struct reply_list *cur = malloc(sizeof(struct reply_list));
208 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
214 *c->in.current_reply_tail = cur;
215 c->in.current_reply_tail = &cur->next;
216 if(c->in.readers && c->in.readers->request == c->in.request_read)
217 pthread_cond_signal(c->in.readers->data);
221 /* event, or unchecked error */
222 event = malloc(sizeof(struct event_list));
225 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
231 *c->in.events_tail = event;
232 c->in.events_tail = &event->next;
233 pthread_cond_signal(&c->in.event_cond);
234 return 1; /* I have something for you... */
237 static xcb_generic_event_t *get_event(xcb_connection_t *c)
239 struct event_list *cur = c->in.events;
240 xcb_generic_event_t *ret;
244 c->in.events = cur->next;
246 c->in.events_tail = &c->in.events;
251 static void free_reply_list(struct reply_list *head)
255 struct reply_list *cur = head;
262 static int read_block(const int fd, void *buf, const ssize_t len)
267 int ret = recv(fd, ((char *) buf) + done, len - done, 0);
271 if(ret < 0 && errno == EAGAIN)
273 if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
282 ret = poll(&pfd, 1, -1);
283 } while (ret == -1 && errno == EINTR);
289 /* Initializing errno here makes sure that for Win32 this loop will execute only once */
292 ret = select(fd + 1, &fds, 0, 0, 0);
293 } while (ret == -1 && errno == EINTR);
294 #endif /* USE_POLL */
302 static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
304 struct reply_list *head;
306 /* If an error occurred when issuing the request, fail immediately. */
309 /* We've read requests past the one we want, so if it has replies we have
310 * them all and they're in the replies map. */
311 else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
313 head = _xcb_map_remove(c->in.replies, request);
314 if(head && head->next)
315 _xcb_map_put(c->in.replies, request, head->next);
317 /* We're currently processing the responses to the request we want, and we
318 * have a reply ready to return. So just return it without blocking. */
319 else if(request == c->in.request_read && c->in.current_reply)
321 head = c->in.current_reply;
322 c->in.current_reply = head->next;
324 c->in.current_reply_tail = &c->in.current_reply;
326 /* We know this request can't have any more replies, and we've already
327 * established it doesn't have a reply now. Don't bother blocking. */
328 else if(request == c->in.request_completed)
330 /* We may have more replies on the way for this request: block until we're
341 if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
344 *error = head->reply;
349 *reply = head->reply;
357 static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
359 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
360 prev_reader = &(*prev_reader)->next;
361 reader->request = request;
363 reader->next = *prev_reader;
364 *prev_reader = reader;
367 static void remove_reader(reader_list **prev_reader, reader_list *reader)
369 while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
370 if(*prev_reader == reader)
372 *prev_reader = (*prev_reader)->next;
377 static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
381 /* If this request has not been written yet, write it. */
382 if(c->out.return_socket || _xcb_out_flush_to(c, request))
384 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
387 insert_reader(&c->in.readers, &reader, request, &cond);
389 while(!poll_for_reply(c, request, &ret, e))
390 if(!_xcb_conn_wait(c, &cond, 0, 0))
393 remove_reader(&c->in.readers, &reader);
394 pthread_cond_destroy(&cond);
397 _xcb_in_wake_up_next_reader(c);
401 static uint64_t widen(xcb_connection_t *c, unsigned int request)
403 uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
404 if(widened_request > c->out.request)
405 widened_request -= UINT64_C(1) << 32;
406 return widened_request;
409 /* Public interface */
411 void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
419 pthread_mutex_lock(&c->iolock);
420 ret = wait_for_reply(c, widen(c, request), e);
421 pthread_mutex_unlock(&c->iolock);
425 static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
428 pend = malloc(sizeof(*pend));
431 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
435 pend->first_request = seq;
436 pend->last_request = seq;
437 pend->workaround = 0;
438 pend->flags = XCB_REQUEST_DISCARD_REPLY;
439 pend->next = *prev_next;
443 c->in.pending_replies_tail = &pend->next;
446 static void discard_reply(xcb_connection_t *c, uint64_t request)
449 pending_reply **prev_pend;
451 /* Free any replies or errors that we've already read. Stop if
452 * xcb_wait_for_reply would block or we've run out of replies. */
453 while(poll_for_reply(c, request, &reply, 0) && reply)
456 /* If we've proven there are no more responses coming, we're done. */
457 if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
460 /* Walk the list of pending requests. Mark the first match for deletion. */
461 for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
463 if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
466 if((*prev_pend)->first_request == request)
468 /* Pending reply found. Mark for discard: */
469 (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
474 /* Pending reply not found (likely due to _unchecked request). Create one: */
475 insert_pending_discard(c, prev_pend, request);
478 void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
483 /* If an error occurred when issuing the request, fail immediately. */
487 pthread_mutex_lock(&c->iolock);
488 discard_reply(c, widen(c, sequence));
489 pthread_mutex_unlock(&c->iolock);
492 int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
500 return 1; /* would not block */
503 pthread_mutex_lock(&c->iolock);
504 ret = poll_for_reply(c, widen(c, request), reply, error);
505 pthread_mutex_unlock(&c->iolock);
509 xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
511 xcb_generic_event_t *ret;
514 pthread_mutex_lock(&c->iolock);
515 /* get_event returns 0 on empty list. */
516 while(!(ret = get_event(c)))
517 if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
520 _xcb_in_wake_up_next_reader(c);
521 pthread_mutex_unlock(&c->iolock);
525 static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
527 xcb_generic_event_t *ret = 0;
530 pthread_mutex_lock(&c->iolock);
531 /* FIXME: follow X meets Z architecture changes. */
533 if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
535 pthread_mutex_unlock(&c->iolock);
540 xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
542 return poll_for_next_event(c, 0);
545 xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
547 return poll_for_next_event(c, 1);
550 xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
553 xcb_generic_error_t *ret = 0;
557 pthread_mutex_lock(&c->iolock);
558 request = widen(c, cookie.sequence);
559 if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected)
560 && XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
562 _xcb_out_send_sync(c);
563 _xcb_out_flush_to(c, c->out.request);
565 reply = wait_for_reply(c, request, &ret);
567 pthread_mutex_unlock(&c->iolock);
571 /* Private interface */
573 int _xcb_in_init(_xcb_in *in)
575 if(pthread_cond_init(&in->event_cond, 0))
581 in->request_read = 0;
582 in->request_completed = 0;
584 in->replies = _xcb_map_new();
588 in->current_reply_tail = &in->current_reply;
589 in->events_tail = &in->events;
590 in->pending_replies_tail = &in->pending_replies;
595 void _xcb_in_destroy(_xcb_in *in)
597 pthread_cond_destroy(&in->event_cond);
598 free_reply_list(in->current_reply);
599 _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
602 struct event_list *e = in->events;
603 in->events = e->next;
607 while(in->pending_replies)
609 pending_reply *pend = in->pending_replies;
610 in->pending_replies = pend->next;
615 void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
619 pthreadret = pthread_cond_signal(c->in.readers->data);
621 pthreadret = pthread_cond_signal(&c->in.event_cond);
622 assert(pthreadret == 0);
625 int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
627 pending_reply *pend = malloc(sizeof(pending_reply));
628 assert(workaround != WORKAROUND_NONE || flags != 0);
631 _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
634 pend->first_request = pend->last_request = request;
635 pend->workaround = workaround;
638 *c->in.pending_replies_tail = pend;
639 c->in.pending_replies_tail = &pend->next;
643 void _xcb_in_replies_done(xcb_connection_t *c)
645 struct pending_reply *pend;
646 if (c->in.pending_replies_tail != &c->in.pending_replies)
648 pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
649 if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
651 pend->last_request = c->out.request;
652 pend->workaround = WORKAROUND_NONE;
657 int _xcb_in_read(xcb_connection_t *c)
659 int n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
661 c->in.queue_len += n;
662 while(read_packet(c))
665 if((n > 0) || (n < 0 && errno == EAGAIN))
667 if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
670 _xcb_conn_shutdown(c, XCB_CONN_ERROR);
674 int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
676 int done = c->in.queue_len;
680 memcpy(buf, c->in.queue, done);
681 c->in.queue_len -= done;
682 memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
686 int ret = read_block(c->fd, (char *) buf + done, len - done);
689 _xcb_conn_shutdown(c, XCB_CONN_ERROR);