2 /* Copyright 1998 by the Massachusetts Institute of Technology.
3 * Copyright (C) 2004-2010 by Daniel Stenberg
5 * Permission to use, copy, modify, and distribute this
6 * software and its documentation for any purpose and without
7 * fee is hereby granted, provided that the above copyright
8 * notice appear in all copies and that both that copyright
9 * notice and this permission notice appear in supporting
10 * documentation, and that the name of M.I.T. not be used in
11 * advertising or publicity pertaining to distribution of the
12 * software without specific, written prior permission.
13 * M.I.T. makes no representations about the suitability of
14 * this software for any purpose. It is provided "as is"
15 * without express or implied warranty.
18 #include "ares_setup.h"
20 #ifdef HAVE_SYS_SOCKET_H
21 # include <sys/socket.h>
26 #ifdef HAVE_NETINET_IN_H
27 # include <netinet/in.h>
29 #ifdef HAVE_NETINET_TCP_H
30 # include <netinet/tcp.h>
35 #ifdef HAVE_ARPA_NAMESER_H
36 # include <arpa/nameser.h>
40 #ifdef HAVE_ARPA_NAMESER_COMPAT_H
41 # include <arpa/nameser_compat.h>
44 #ifdef HAVE_SYS_TIME_H
45 # include <sys/time.h>
54 #ifdef HAVE_SYS_IOCTL_H
55 # include <sys/ioctl.h>
58 # include <sys/filio.h>
69 #include "ares_nowarn.h"
70 #include "ares_private.h"
73 static int try_again(int errnum);
74 static void write_tcp_data(ares_channel channel, fd_set *write_fds,
75 ares_socket_t write_fd, struct timeval *now);
76 static void read_tcp_data(ares_channel channel, fd_set *read_fds,
77 ares_socket_t read_fd, struct timeval *now);
78 static void read_udp_packets(ares_channel channel, fd_set *read_fds,
79 ares_socket_t read_fd, struct timeval *now);
80 static void advance_tcp_send_queue(ares_channel channel, int whichserver,
82 static void process_timeouts(ares_channel channel, struct timeval *now);
83 static void process_broken_connections(ares_channel channel,
85 static void process_answer(ares_channel channel, unsigned char *abuf,
86 int alen, int whichserver, int tcp,
88 static void handle_error(ares_channel channel, int whichserver,
90 static void skip_server(ares_channel channel, struct query *query,
92 static void next_server(ares_channel channel, struct query *query,
94 static int open_tcp_socket(ares_channel channel, struct server_state *server);
95 static int open_udp_socket(ares_channel channel, struct server_state *server);
96 static int same_questions(const unsigned char *qbuf, int qlen,
97 const unsigned char *abuf, int alen);
98 static int same_address(struct sockaddr *sa, struct ares_addr *aa);
99 static void end_query(ares_channel channel, struct query *query, int status,
100 unsigned char *abuf, int alen);
102 /* return true if now is exactly check time or later */
103 int ares__timedout(struct timeval *now,
104 struct timeval *check)
106 long secs = (now->tv_sec - check->tv_sec);
109 return 1; /* yes, timed out */
111 return 0; /* nope, not timed out */
113 /* if the full seconds were identical, check the sub second parts */
114 return (now->tv_usec - check->tv_usec >= 0);
117 /* add the specific number of milliseconds to the time in the first argument */
118 int ares__timeadd(struct timeval *now,
121 now->tv_sec += millisecs/1000;
122 now->tv_usec += (millisecs%1000)*1000;
124 if(now->tv_usec >= 1000000) {
126 now->tv_usec -= 1000000;
132 /* return time offset between now and (future) check, in milliseconds */
133 long ares__timeoffset(struct timeval *now,
134 struct timeval *check)
136 return (check->tv_sec - now->tv_sec)*1000 +
137 (check->tv_usec - now->tv_usec)/1000;
142 * generic process function
144 static void processfds(ares_channel channel,
145 fd_set *read_fds, ares_socket_t read_fd,
146 fd_set *write_fds, ares_socket_t write_fd)
148 struct timeval now = ares__tvnow();
150 write_tcp_data(channel, write_fds, write_fd, &now);
151 read_tcp_data(channel, read_fds, read_fd, &now);
152 read_udp_packets(channel, read_fds, read_fd, &now);
153 process_timeouts(channel, &now);
154 process_broken_connections(channel, &now);
157 /* Something interesting happened on the wire, or there was a timeout.
158 * See what's up and respond accordingly.
160 void ares_process(ares_channel channel, fd_set *read_fds, fd_set *write_fds)
162 processfds(channel, read_fds, ARES_SOCKET_BAD, write_fds, ARES_SOCKET_BAD);
165 /* Something interesting happened on the wire, or there was a timeout.
166 * See what's up and respond accordingly.
168 void ares_process_fd(ares_channel channel,
169 ares_socket_t read_fd, /* use ARES_SOCKET_BAD or valid
171 ares_socket_t write_fd)
173 processfds(channel, NULL, read_fd, NULL, write_fd);
177 /* Return 1 if the specified error number describes a readiness error, or 0
178 * otherwise. This is mostly for HP-UX, which could return EAGAIN or
179 * EWOULDBLOCK. See this man page
181 * http://devrsrc1.external.hp.com/STKS/cgi-bin/man2html?
182 * manpage=/usr/share/man/man2.Z/send.2
184 static int try_again(int errnum)
186 #if !defined EWOULDBLOCK && !defined EAGAIN
187 #error "Neither EWOULDBLOCK nor EAGAIN defined"
195 #if defined EAGAIN && EAGAIN != EWOULDBLOCK
203 /* If any TCP sockets select true for writing, write out queued data
206 static void write_tcp_data(ares_channel channel,
208 ares_socket_t write_fd,
211 struct server_state *server;
212 struct send_request *sendreq;
219 if(!write_fds && (write_fd == ARES_SOCKET_BAD))
220 /* no possible action */
223 for (i = 0; i < channel->nservers; i++)
225 /* Make sure server has data to send and is selected in write_fds or
227 server = &channel->servers[i];
228 if (!server->qhead || server->tcp_socket == ARES_SOCKET_BAD ||
233 if(!FD_ISSET(server->tcp_socket, write_fds))
237 if(server->tcp_socket != write_fd)
242 /* If there's an error and we close this socket, then open
243 * another with the same fd to talk to another server, then we
244 * don't want to think that it was the new socket that was
245 * ready. This is not disastrous, but is likely to result in
246 * extra system calls and confusion. */
247 FD_CLR(server->tcp_socket, write_fds);
249 /* Count the number of send queue items. */
251 for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
254 /* Allocate iovecs so we can send all our data at once. */
255 vec = malloc(n * sizeof(struct iovec));
258 /* Fill in the iovecs and send. */
260 for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
262 vec[n].iov_base = (char *) sendreq->data;
263 vec[n].iov_len = sendreq->len;
266 wcount = (ssize_t)writev(server->tcp_socket, vec, (int)n);
270 if (!try_again(SOCKERRNO))
271 handle_error(channel, i, now);
275 /* Advance the send queue by as many bytes as we sent. */
276 advance_tcp_send_queue(channel, i, wcount);
280 /* Can't allocate iovecs; just send the first request. */
281 sendreq = server->qhead;
283 scount = swrite(server->tcp_socket, sendreq->data, sendreq->len);
286 if (!try_again(SOCKERRNO))
287 handle_error(channel, i, now);
291 /* Advance the send queue by as many bytes as we sent. */
292 advance_tcp_send_queue(channel, i, scount);
297 /* Consume the given number of bytes from the head of the TCP send queue. */
298 static void advance_tcp_send_queue(ares_channel channel, int whichserver,
301 struct send_request *sendreq;
302 struct server_state *server = &channel->servers[whichserver];
303 while (num_bytes > 0) {
304 sendreq = server->qhead;
305 if ((size_t)num_bytes >= sendreq->len) {
306 num_bytes -= sendreq->len;
307 server->qhead = sendreq->next;
308 if (sendreq->data_storage)
309 free(sendreq->data_storage);
311 if (server->qhead == NULL) {
312 SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 0);
313 server->qtail = NULL;
315 /* qhead is NULL so we cannot continue this loop */
320 sendreq->data += num_bytes;
321 sendreq->len -= num_bytes;
327 /* If any TCP socket selects true for reading, read some data,
328 * allocate a buffer if we finish reading the length word, and process
329 * a packet if we finish reading one.
331 static void read_tcp_data(ares_channel channel, fd_set *read_fds,
332 ares_socket_t read_fd, struct timeval *now)
334 struct server_state *server;
338 if(!read_fds && (read_fd == ARES_SOCKET_BAD))
339 /* no possible action */
342 for (i = 0; i < channel->nservers; i++)
344 /* Make sure the server has a socket and is selected in read_fds. */
345 server = &channel->servers[i];
346 if (server->tcp_socket == ARES_SOCKET_BAD || server->is_broken)
350 if(!FD_ISSET(server->tcp_socket, read_fds))
354 if(server->tcp_socket != read_fd)
359 /* If there's an error and we close this socket, then open
360 * another with the same fd to talk to another server, then we
361 * don't want to think that it was the new socket that was
362 * ready. This is not disastrous, but is likely to result in
363 * extra system calls and confusion. */
364 FD_CLR(server->tcp_socket, read_fds);
366 if (server->tcp_lenbuf_pos != 2)
368 /* We haven't yet read a length word, so read that (or
369 * what's left to read of it).
371 count = sread(server->tcp_socket,
372 server->tcp_lenbuf + server->tcp_lenbuf_pos,
373 2 - server->tcp_lenbuf_pos);
376 if (!(count == -1 && try_again(SOCKERRNO)))
377 handle_error(channel, i, now);
381 server->tcp_lenbuf_pos += (int)count;
382 if (server->tcp_lenbuf_pos == 2)
384 /* We finished reading the length word. Decode the
385 * length and allocate a buffer for the data.
387 server->tcp_length = server->tcp_lenbuf[0] << 8
388 | server->tcp_lenbuf[1];
389 server->tcp_buffer = malloc(server->tcp_length);
390 if (!server->tcp_buffer)
391 handle_error(channel, i, now);
392 server->tcp_buffer_pos = 0;
397 /* Read data into the allocated buffer. */
398 count = sread(server->tcp_socket,
399 server->tcp_buffer + server->tcp_buffer_pos,
400 server->tcp_length - server->tcp_buffer_pos);
403 if (!(count == -1 && try_again(SOCKERRNO)))
404 handle_error(channel, i, now);
408 server->tcp_buffer_pos += (int)count;
409 if (server->tcp_buffer_pos == server->tcp_length)
411 /* We finished reading this answer; process it and
412 * prepare to read another length word.
414 process_answer(channel, server->tcp_buffer, server->tcp_length,
416 if (server->tcp_buffer)
417 free(server->tcp_buffer);
418 server->tcp_buffer = NULL;
419 server->tcp_lenbuf_pos = 0;
420 server->tcp_buffer_pos = 0;
426 /* If any UDP sockets select true for reading, process them. */
427 static void read_udp_packets(ares_channel channel, fd_set *read_fds,
428 ares_socket_t read_fd, struct timeval *now)
430 struct server_state *server;
433 unsigned char buf[PACKETSZ + 1];
435 ares_socklen_t fromlen;
438 struct sockaddr_in sa4;
439 struct sockaddr_in6 sa6;
443 if(!read_fds && (read_fd == ARES_SOCKET_BAD))
444 /* no possible action */
447 for (i = 0; i < channel->nservers; i++)
449 /* Make sure the server has a socket and is selected in read_fds. */
450 server = &channel->servers[i];
452 if (server->udp_socket == ARES_SOCKET_BAD || server->is_broken)
456 if(!FD_ISSET(server->udp_socket, read_fds))
460 if(server->udp_socket != read_fd)
465 /* If there's an error and we close this socket, then open
466 * another with the same fd to talk to another server, then we
467 * don't want to think that it was the new socket that was
468 * ready. This is not disastrous, but is likely to result in
469 * extra system calls and confusion. */
470 FD_CLR(server->udp_socket, read_fds);
472 /* To reduce event loop overhead, read and process as many
473 * packets as we can. */
476 if (server->addr.family == AF_INET)
477 fromlen = sizeof(from.sa4);
479 fromlen = sizeof(from.sa6);
480 count = (ssize_t)recvfrom(server->udp_socket, (void *)buf, sizeof(buf),
481 0, &from.sa, &fromlen);
483 count = sread(server->udp_socket, buf, sizeof(buf));
485 if (count == -1 && try_again(SOCKERRNO))
488 handle_error(channel, i, now);
490 else if (!same_address(&from.sa, &server->addr))
491 /* The address the response comes from does not match
492 * the address we sent the request to. Someone may be
493 * attempting to perform a cache poisoning attack. */
497 process_answer(channel, buf, (int)count, i, 0, now);
502 /* If any queries have timed out, note the timeout and move them on. */
503 static void process_timeouts(ares_channel channel, struct timeval *now)
505 time_t t; /* the time of the timeouts we're processing */
507 struct list_node* list_head;
508 struct list_node* list_node;
510 /* Process all the timeouts that have fired since the last time we
511 * processed timeouts. If things are going well, then we'll have
512 * hundreds/thousands of queries that fall into future buckets, and
513 * only a handful of requests that fall into the "now" bucket, so
514 * this should be quite quick.
516 for (t = channel->last_timeout_processed; t <= now->tv_sec; t++)
518 list_head = &(channel->queries_by_timeout[t % ARES_TIMEOUT_TABLE_SIZE]);
519 for (list_node = list_head->next; list_node != list_head; )
521 query = list_node->data;
522 list_node = list_node->next; /* in case the query gets deleted */
523 if (query->timeout.tv_sec && ares__timedout(now, &query->timeout))
525 query->error_status = ARES_ETIMEOUT;
527 next_server(channel, query, now);
531 channel->last_timeout_processed = now->tv_sec;
534 /* Handle an answer from a server. */
535 static void process_answer(ares_channel channel, unsigned char *abuf,
536 int alen, int whichserver, int tcp,
542 struct list_node* list_head;
543 struct list_node* list_node;
545 /* If there's no room in the answer for a header, we can't do much
550 /* Grab the query ID, truncate bit, and response code from the packet. */
551 id = DNS_HEADER_QID(abuf);
552 tc = DNS_HEADER_TC(abuf);
553 rcode = DNS_HEADER_RCODE(abuf);
555 /* Find the query corresponding to this packet. The queries are
556 * hashed/bucketed by query id, so this lookup should be quick.
557 * Note that both the query id and the questions must be the same;
558 * when the query id wraps around we can have multiple outstanding
559 * queries with the same query id, so we need to check both the id and
563 list_head = &(channel->queries_by_qid[id % ARES_QID_TABLE_SIZE]);
564 for (list_node = list_head->next; list_node != list_head;
565 list_node = list_node->next)
567 struct query *q = list_node->data;
568 if ((q->qid == id) && same_questions(q->qbuf, q->qlen, abuf, alen))
577 /* If we got a truncated UDP packet and are not ignoring truncation,
578 * don't accept the packet, and switch the query to TCP if we hadn't
581 if ((tc || alen > PACKETSZ) && !tcp && !(channel->flags & ARES_FLAG_IGNTC))
583 if (!query->using_tcp)
585 query->using_tcp = 1;
586 ares__send_query(channel, query, now);
591 /* Limit alen to PACKETSZ if we aren't using TCP (only relevant if we
592 * are ignoring truncation.
594 if (alen > PACKETSZ && !tcp)
597 /* If we aren't passing through all error packets, discard packets
598 * with SERVFAIL, NOTIMP, or REFUSED response codes.
600 if (!(channel->flags & ARES_FLAG_NOCHECKRESP))
602 if (rcode == SERVFAIL || rcode == NOTIMP || rcode == REFUSED)
604 skip_server(channel, query, whichserver);
605 if (query->server == whichserver)
606 next_server(channel, query, now);
611 end_query(channel, query, ARES_SUCCESS, abuf, alen);
614 /* Close all the connections that are no longer usable. */
615 static void process_broken_connections(ares_channel channel,
619 for (i = 0; i < channel->nservers; i++)
621 struct server_state *server = &channel->servers[i];
622 if (server->is_broken)
624 handle_error(channel, i, now);
629 static void handle_error(ares_channel channel, int whichserver,
632 struct server_state *server;
634 struct list_node list_head;
635 struct list_node* list_node;
637 server = &channel->servers[whichserver];
639 /* Reset communications with this server. */
640 ares__close_sockets(channel, server);
642 /* Tell all queries talking to this server to move on and not try
643 * this server again. We steal the current list of queries that were
644 * in-flight to this server, since when we call next_server this can
645 * cause the queries to be re-sent to this server, which will
646 * re-insert these queries in that same server->queries_to_server
649 ares__init_list_head(&list_head);
650 ares__swap_lists(&list_head, &(server->queries_to_server));
651 for (list_node = list_head.next; list_node != &list_head; )
653 query = list_node->data;
654 list_node = list_node->next; /* in case the query gets deleted */
655 assert(query->server == whichserver);
656 skip_server(channel, query, whichserver);
657 next_server(channel, query, now);
659 /* Each query should have removed itself from our temporary list as
660 * it re-sent itself or finished up...
662 assert(ares__is_list_empty(&list_head));
665 static void skip_server(ares_channel channel, struct query *query,
667 /* The given server gave us problems with this query, so if we have
668 * the luxury of using other servers, then let's skip the
669 * potentially broken server and just use the others. If we only
670 * have one server and we need to retry then we should just go ahead
671 * and re-use that server, since it's our only hope; perhaps we
672 * just got unlucky, and retrying will work (eg, the server timed
673 * out our TCP connection just as we were sending another request).
675 if (channel->nservers > 1)
677 query->server_info[whichserver].skip_server = 1;
681 static void next_server(ares_channel channel, struct query *query,
684 /* We need to try each server channel->tries times. We have channel->nservers
685 * servers to try. In total, we need to do channel->nservers * channel->tries
686 * attempts. Use query->try to remember how many times we already attempted
687 * this query. Use modular arithmetic to find the next server to try. */
688 while (++(query->try_count) < (channel->nservers * channel->tries))
690 struct server_state *server;
692 /* Move on to the next server. */
693 query->server = (query->server + 1) % channel->nservers;
694 server = &channel->servers[query->server];
696 /* We don't want to use this server if (1) we decided this
697 * connection is broken, and thus about to be closed, (2)
698 * we've decided to skip this server because of earlier
699 * errors we encountered, or (3) we already sent this query
700 * over this exact connection.
702 if (!server->is_broken &&
703 !query->server_info[query->server].skip_server &&
704 !(query->using_tcp &&
705 (query->server_info[query->server].tcp_connection_generation ==
706 server->tcp_connection_generation)))
708 ares__send_query(channel, query, now);
712 /* You might think that with TCP we only need one try. However,
713 * even when using TCP, servers can time-out our connection just
714 * as we're sending a request, or close our connection because
715 * they die, or never send us a reply because they get wedged or
716 * tickle a bug that drops our request.
720 /* If we are here, all attempts to perform query failed. */
721 end_query(channel, query, query->error_status, NULL, 0);
724 void ares__send_query(ares_channel channel, struct query *query,
727 struct send_request *sendreq;
728 struct server_state *server;
731 server = &channel->servers[query->server];
732 if (query->using_tcp)
734 /* Make sure the TCP socket for this server is set up and queue
737 if (server->tcp_socket == ARES_SOCKET_BAD)
739 if (open_tcp_socket(channel, server) == -1)
741 skip_server(channel, query, query->server);
742 next_server(channel, query, now);
746 sendreq = calloc(1, sizeof(struct send_request));
749 end_query(channel, query, ARES_ENOMEM, NULL, 0);
752 /* To make the common case fast, we avoid copies by using the
753 * query's tcpbuf for as long as the query is alive. In the rare
754 * case where the query ends while it's queued for transmission,
755 * then we give the sendreq its own copy of the request packet
756 * and put it in sendreq->data_storage.
758 sendreq->data_storage = NULL;
759 sendreq->data = query->tcpbuf;
760 sendreq->len = query->tcplen;
761 sendreq->owner_query = query;
762 sendreq->next = NULL;
764 server->qtail->next = sendreq;
767 SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 1);
768 server->qhead = sendreq;
770 server->qtail = sendreq;
771 query->server_info[query->server].tcp_connection_generation =
772 server->tcp_connection_generation;
776 if (server->udp_socket == ARES_SOCKET_BAD)
778 if (open_udp_socket(channel, server) == -1)
780 skip_server(channel, query, query->server);
781 next_server(channel, query, now);
785 if (swrite(server->udp_socket, query->qbuf, query->qlen) == -1)
787 /* FIXME: Handle EAGAIN here since it likely can happen. */
788 skip_server(channel, query, query->server);
789 next_server(channel, query, now);
793 timeplus = channel->timeout << (query->try_count / channel->nservers);
794 timeplus = (timeplus * (9 + (rand () & 7))) / 16;
795 query->timeout = *now;
796 ares__timeadd(&query->timeout,
798 /* Keep track of queries bucketed by timeout, so we can process
799 * timeout events quickly.
801 ares__remove_from_list(&(query->queries_by_timeout));
802 ares__insert_in_list(
803 &(query->queries_by_timeout),
804 &(channel->queries_by_timeout[query->timeout.tv_sec %
805 ARES_TIMEOUT_TABLE_SIZE]));
807 /* Keep track of queries bucketed by server, so we can process server
810 ares__remove_from_list(&(query->queries_to_server));
811 ares__insert_in_list(&(query->queries_to_server),
812 &(server->queries_to_server));
816 * setsocknonblock sets the given socket to either blocking or non-blocking
817 * mode based on the 'nonblock' boolean argument. This function is highly
820 static int setsocknonblock(ares_socket_t sockfd, /* operate on this */
821 int nonblock /* TRUE or FALSE */)
823 #if defined(USE_BLOCKING_SOCKETS)
825 return 0; /* returns success */
827 #elif defined(HAVE_FCNTL_O_NONBLOCK)
829 /* most recent unix versions */
831 flags = fcntl(sockfd, F_GETFL, 0);
832 if (FALSE != nonblock)
833 return fcntl(sockfd, F_SETFL, flags | O_NONBLOCK);
835 return fcntl(sockfd, F_SETFL, flags & (~O_NONBLOCK));
837 #elif defined(HAVE_IOCTL_FIONBIO)
839 /* older unix versions */
842 return ioctl(sockfd, FIONBIO, &flags);
844 #elif defined(HAVE_IOCTLSOCKET_FIONBIO)
853 return ioctlsocket(sockfd, FIONBIO, &flags);
855 #elif defined(HAVE_IOCTLSOCKET_CAMEL_FIONBIO)
858 return IoctlSocket(sockfd, FIONBIO, (long)nonblock);
860 #elif defined(HAVE_SETSOCKOPT_SO_NONBLOCK)
863 long b = nonblock ? 1 : 0;
864 return setsockopt(sockfd, SOL_SOCKET, SO_NONBLOCK, &b, sizeof(b));
867 # error "no non-blocking method was found/used/set"
871 static int configure_socket(ares_socket_t s, int family, ares_channel channel)
875 struct sockaddr_in sa4;
876 struct sockaddr_in6 sa6;
879 setsocknonblock(s, TRUE);
881 #if defined(FD_CLOEXEC) && !defined(MSDOS)
882 /* Configure the socket fd as close-on-exec. */
883 if (fcntl(s, F_SETFD, FD_CLOEXEC) == -1)
887 /* Set the socket's send and receive buffer sizes. */
888 if ((channel->socket_send_buffer_size > 0) &&
889 setsockopt(s, SOL_SOCKET, SO_SNDBUF,
890 (void *)&channel->socket_send_buffer_size,
891 sizeof(channel->socket_send_buffer_size)) == -1)
894 if ((channel->socket_receive_buffer_size > 0) &&
895 setsockopt(s, SOL_SOCKET, SO_RCVBUF,
896 (void *)&channel->socket_receive_buffer_size,
897 sizeof(channel->socket_receive_buffer_size)) == -1)
900 #ifdef SO_BINDTODEVICE
901 if (channel->local_dev_name[0]) {
902 if (setsockopt(s, SOL_SOCKET, SO_BINDTODEVICE,
903 channel->local_dev_name, sizeof(channel->local_dev_name))) {
904 /* Only root can do this, and usually not fatal if it doesn't work, so */
905 /* just continue on. */
910 if (family == AF_INET) {
911 if (channel->local_ip4) {
912 memset(&local.sa4, 0, sizeof(local.sa4));
913 local.sa4.sin_family = AF_INET;
914 local.sa4.sin_addr.s_addr = htonl(channel->local_ip4);
915 if (bind(s, &local.sa, sizeof(local.sa4)) < 0)
919 else if (family == AF_INET6) {
920 if (memcmp(channel->local_ip6, &ares_in6addr_any, sizeof(channel->local_ip6)) != 0) {
921 memset(&local.sa6, 0, sizeof(local.sa6));
922 local.sa6.sin6_family = AF_INET6;
923 memcpy(&local.sa6.sin6_addr, channel->local_ip6, sizeof(channel->local_ip6));
924 if (bind(s, &local.sa, sizeof(local.sa6)) < 0)
932 static int open_tcp_socket(ares_channel channel, struct server_state *server)
936 ares_socklen_t salen;
938 struct sockaddr_in sa4;
939 struct sockaddr_in6 sa6;
943 switch (server->addr.family)
946 sa = (void *)&saddr.sa4;
947 salen = sizeof(saddr.sa4);
948 memset(sa, 0, salen);
949 saddr.sa4.sin_family = AF_INET;
950 saddr.sa4.sin_port = (unsigned short)(channel->tcp_port & 0xffff);
951 memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
952 sizeof(server->addr.addrV4));
955 sa = (void *)&saddr.sa6;
956 salen = sizeof(saddr.sa6);
957 memset(sa, 0, salen);
958 saddr.sa6.sin6_family = AF_INET6;
959 saddr.sa6.sin6_port = (unsigned short)(channel->tcp_port & 0xffff);
960 memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
961 sizeof(server->addr.addrV6));
967 /* Acquire a socket. */
968 s = socket(server->addr.family, SOCK_STREAM, 0);
969 if (s == ARES_SOCKET_BAD)
973 if (configure_socket(s, server->addr.family, channel) < 0)
981 * Disable the Nagle algorithm (only relevant for TCP sockets, and thus not
982 * in configure_socket). In general, in DNS lookups we're pretty much
983 * interested in firing off a single request and then waiting for a reply,
984 * so batching isn't very interesting.
987 if (setsockopt(s, IPPROTO_TCP, TCP_NODELAY,
988 (void *)&opt, sizeof(opt)) == -1)
995 /* Connect to the server. */
996 if (connect(s, sa, salen) == -1)
1000 if (err != EINPROGRESS && err != EWOULDBLOCK)
1007 if (channel->sock_create_cb)
1009 int err = channel->sock_create_cb(s, SOCK_STREAM,
1010 channel->sock_create_cb_data);
1018 SOCK_STATE_CALLBACK(channel, s, 1, 0);
1019 server->tcp_buffer_pos = 0;
1020 server->tcp_socket = s;
1021 server->tcp_connection_generation = ++channel->tcp_connection_generation;
1025 static int open_udp_socket(ares_channel channel, struct server_state *server)
1028 ares_socklen_t salen;
1030 struct sockaddr_in sa4;
1031 struct sockaddr_in6 sa6;
1033 struct sockaddr *sa;
1035 switch (server->addr.family)
1038 sa = (void *)&saddr.sa4;
1039 salen = sizeof(saddr.sa4);
1040 memset(sa, 0, salen);
1041 saddr.sa4.sin_family = AF_INET;
1042 saddr.sa4.sin_port = (unsigned short)(channel->udp_port & 0xffff);
1043 memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
1044 sizeof(server->addr.addrV4));
1047 sa = (void *)&saddr.sa6;
1048 salen = sizeof(saddr.sa6);
1049 memset(sa, 0, salen);
1050 saddr.sa6.sin6_family = AF_INET6;
1051 saddr.sa6.sin6_port = (unsigned short)(channel->udp_port & 0xffff);
1052 memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
1053 sizeof(server->addr.addrV6));
1059 /* Acquire a socket. */
1060 s = socket(server->addr.family, SOCK_DGRAM, 0);
1061 if (s == ARES_SOCKET_BAD)
1064 /* Set the socket non-blocking. */
1065 if (configure_socket(s, server->addr.family, channel) < 0)
1071 /* Connect to the server. */
1072 if (connect(s, sa, salen) == -1)
1074 int err = SOCKERRNO;
1076 if (err != EINPROGRESS && err != EWOULDBLOCK)
1083 if (channel->sock_create_cb)
1085 int err = channel->sock_create_cb(s, SOCK_DGRAM,
1086 channel->sock_create_cb_data);
1094 SOCK_STATE_CALLBACK(channel, s, 1, 0);
1096 server->udp_socket = s;
1100 static int same_questions(const unsigned char *qbuf, int qlen,
1101 const unsigned char *abuf, int alen)
1104 const unsigned char *p;
1113 if (qlen < HFIXEDSZ || alen < HFIXEDSZ)
1116 /* Extract qdcount from the request and reply buffers and compare them. */
1117 q.qdcount = DNS_HEADER_QDCOUNT(qbuf);
1118 a.qdcount = DNS_HEADER_QDCOUNT(abuf);
1119 if (q.qdcount != a.qdcount)
1122 /* For each question in qbuf, find it in abuf. */
1123 q.p = qbuf + HFIXEDSZ;
1124 for (i = 0; i < q.qdcount; i++)
1126 /* Decode the question in the query. */
1127 if (ares_expand_name(q.p, qbuf, qlen, &q.name, &q.namelen)
1131 if (q.p + QFIXEDSZ > qbuf + qlen)
1136 q.type = DNS_QUESTION_TYPE(q.p);
1137 q.dnsclass = DNS_QUESTION_CLASS(q.p);
1140 /* Search for this question in the answer. */
1141 a.p = abuf + HFIXEDSZ;
1142 for (j = 0; j < a.qdcount; j++)
1144 /* Decode the question in the answer. */
1145 if (ares_expand_name(a.p, abuf, alen, &a.name, &a.namelen)
1152 if (a.p + QFIXEDSZ > abuf + alen)
1158 a.type = DNS_QUESTION_TYPE(a.p);
1159 a.dnsclass = DNS_QUESTION_CLASS(a.p);
1162 /* Compare the decoded questions. */
1163 if (strcasecmp(q.name, a.name) == 0 && q.type == a.type
1164 && q.dnsclass == a.dnsclass)
1179 static int same_address(struct sockaddr *sa, struct ares_addr *aa)
1184 if (sa->sa_family == aa->family)
1189 addr1 = &aa->addrV4;
1190 addr2 = &((struct sockaddr_in *)sa)->sin_addr;
1191 if (memcmp(addr1, addr2, sizeof(aa->addrV4)) == 0)
1192 return 1; /* match */
1195 addr1 = &aa->addrV6;
1196 addr2 = &((struct sockaddr_in6 *)sa)->sin6_addr;
1197 if (memcmp(addr1, addr2, sizeof(aa->addrV6)) == 0)
1198 return 1; /* match */
1204 return 0; /* different */
1207 static void end_query (ares_channel channel, struct query *query, int status,
1208 unsigned char *abuf, int alen)
1212 /* First we check to see if this query ended while one of our send
1213 * queues still has pointers to it.
1215 for (i = 0; i < channel->nservers; i++)
1217 struct server_state *server = &channel->servers[i];
1218 struct send_request *sendreq;
1219 for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
1220 if (sendreq->owner_query == query)
1222 sendreq->owner_query = NULL;
1223 assert(sendreq->data_storage == NULL);
1224 if (status == ARES_SUCCESS)
1226 /* We got a reply for this query, but this queued
1227 * sendreq points into this soon-to-be-gone query's
1228 * tcpbuf. Probably this means we timed out and queued
1229 * the query for retransmission, then received a
1230 * response before actually retransmitting. This is
1231 * perfectly fine, so we want to keep the connection
1232 * running smoothly if we can. But in the worst case
1233 * we may have sent only some prefix of the query,
1234 * with some suffix of the query left to send. Also,
1235 * the buffer may be queued on multiple queues. To
1236 * prevent dangling pointers to the query's tcpbuf and
1237 * handle these cases, we just give such sendreqs
1238 * their own copy of the query packet.
1240 sendreq->data_storage = malloc(sendreq->len);
1241 if (sendreq->data_storage != NULL)
1243 memcpy(sendreq->data_storage, sendreq->data, sendreq->len);
1244 sendreq->data = sendreq->data_storage;
1247 if ((status != ARES_SUCCESS) || (sendreq->data_storage == NULL))
1249 /* We encountered an error (probably a timeout,
1250 * suggesting the DNS server we're talking to is
1251 * probably unreachable, wedged, or severely
1252 * overloaded) or we couldn't copy the request, so
1253 * mark the connection as broken. When we get to
1254 * process_broken_connections() we'll close the
1255 * connection and try to re-send requests to another
1258 server->is_broken = 1;
1259 /* Just to be paranoid, zero out this sendreq... */
1260 sendreq->data = NULL;
1266 /* Invoke the callback */
1267 query->callback(query->arg, status, query->timeouts, abuf, alen);
1268 ares__free_query(query);
1270 /* Simple cleanup policy: if no queries are remaining, close all
1271 * network sockets unless STAYOPEN is set.
1273 if (!(channel->flags & ARES_FLAG_STAYOPEN) &&
1274 ares__is_list_empty(&(channel->all_queries)))
1276 for (i = 0; i < channel->nservers; i++)
1277 ares__close_sockets(channel, &channel->servers[i]);
1281 void ares__free_query(struct query *query)
1283 /* Remove the query from all the lists in which it is linked */
1284 ares__remove_from_list(&(query->queries_by_qid));
1285 ares__remove_from_list(&(query->queries_by_timeout));
1286 ares__remove_from_list(&(query->queries_to_server));
1287 ares__remove_from_list(&(query->all_queries));
1288 /* Zero out some important stuff, to help catch bugs */
1289 query->callback = NULL;
1291 /* Deallocate the memory associated with the query */
1292 free(query->tcpbuf);
1293 free(query->server_info);