1 /* Copyright (C) 1998-2002,2003,2004,2005,2006,2007
2 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1998.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30 #include <sys/socket.h>
35 #include <not-cancel.h>
36 #include <nis/rpcsvc/nis.h>
38 #include "nscd-client.h"
42 __readall (int fd, void *buf, size_t len)
48 ret = TEMP_FAILURE_RETRY (__read (fd, buf, n));
51 buf = (char *) buf + ret;
55 return ret < 0 ? ret : len - n;
60 __readvall (int fd, const struct iovec *iov, int iovcnt)
62 ssize_t ret = TEMP_FAILURE_RETRY (__readv (fd, iov, iovcnt));
67 for (int i = 0; i < iovcnt; ++i)
68 total += iov[i].iov_len;
72 struct iovec iov_buf[iovcnt];
75 struct iovec *iovp = memcpy (iov_buf, iov, iovcnt * sizeof (*iov));
78 while (iovp->iov_len <= r)
84 iovp->iov_base = (char *) iovp->iov_base + r;
86 r = TEMP_FAILURE_RETRY (__readv (fd, iovp, iovcnt));
100 open_socket (request_type type, const char *key, size_t keylen)
102 int sock = __socket (PF_UNIX, SOCK_STREAM, 0);
112 /* Make socket non-blocking. */
113 __fcntl (sock, F_SETFL, O_RDWR | O_NONBLOCK);
115 struct sockaddr_un sun;
116 sun.sun_family = AF_UNIX;
117 strcpy (sun.sun_path, _PATH_NSCDSOCKET);
118 if (__connect (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0
119 && errno != EINPROGRESS)
122 reqdata.req.version = NSCD_VERSION;
123 reqdata.req.type = type;
124 reqdata.req.key_len = keylen;
126 memcpy (reqdata.key, key, keylen);
128 bool first_try = true;
129 struct timeval tvend;
133 # define MSG_NOSIGNAL 0
135 ssize_t wres = TEMP_FAILURE_RETRY (__send (sock, &reqdata,
138 if (__builtin_expect (wres == (ssize_t) sizeof (reqdata), 1))
139 /* We managed to send the request. */
142 if (wres != -1 || errno != EAGAIN)
143 /* Something is really wrong, no chance to continue. */
146 /* The daemon is busy wait for it. */
150 gettimeofday (&tvend, NULL);
158 gettimeofday (&now, NULL);
159 to = ((tvend.tv_sec - now.tv_sec) * 1000
160 + (tvend.tv_usec - now.tv_usec) / 1000);
163 struct pollfd fds[1];
165 fds[0].events = POLLOUT | POLLERR | POLLHUP;
166 if (__poll (fds, 1, to) <= 0)
167 /* The connection timed out or broke down. */
170 /* We try to write again. */
174 close_not_cancel_no_status (sock);
181 __nscd_unmap (struct mapped_database *mapped)
183 assert (mapped->counter == 0);
184 __munmap ((void *) mapped->head, mapped->mapsize);
190 wait_on_socket (int sock)
192 struct pollfd fds[1];
194 fds[0].events = POLLIN | POLLERR | POLLHUP;
195 int n = __poll (fds, 1, 5 * 1000);
196 if (n == -1 && __builtin_expect (errno == EINTR, 0))
198 /* Handle the case where the poll() call is interrupted by a
199 signal. We cannot just use TEMP_FAILURE_RETRY since it might
200 lead to infinite loops. */
202 (void) __gettimeofday (&now, NULL);
203 long int end = (now.tv_sec + 5) * 1000 + (now.tv_usec + 500) / 1000;
206 long int timeout = end - (now.tv_sec * 1000
207 + (now.tv_usec + 500) / 1000);
208 n = __poll (fds, 1, timeout);
209 if (n != -1 || errno != EINTR)
211 (void) __gettimeofday (&now, NULL);
219 /* Try to get a file descriptor for the shared meory segment
220 containing the database. */
221 static struct mapped_database *
222 get_mapping (request_type type, const char *key,
223 struct mapped_database **mappedp)
225 struct mapped_database *result = NO_MAPPING;
227 const size_t keylen = strlen (key) + 1;
228 int saved_errno = errno;
231 char resdata[keylen];
233 /* Open a socket and send the request. */
234 int sock = open_socket (type, key, keylen);
238 /* Room for the data sent along with the file descriptor. We expect
239 the key name back. */
241 iov[0].iov_base = resdata;
242 iov[0].iov_len = keylen;
247 char bytes[CMSG_SPACE (sizeof (int))];
249 struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1,
250 .msg_control = buf.bytes,
251 .msg_controllen = sizeof (buf) };
252 struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);
254 cmsg->cmsg_level = SOL_SOCKET;
255 cmsg->cmsg_type = SCM_RIGHTS;
256 cmsg->cmsg_len = CMSG_LEN (sizeof (int));
258 /* This access is well-aligned since BUF is correctly aligned for an
259 int and CMSG_DATA preserves this alignment. */
260 *(int *) CMSG_DATA (cmsg) = -1;
262 msg.msg_controllen = cmsg->cmsg_len;
264 if (wait_on_socket (sock) <= 0)
267 if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg, 0))
271 mapfd = *(int *) CMSG_DATA (cmsg);
273 if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len
274 != CMSG_LEN (sizeof (int)), 0))
278 if (__builtin_expect (strcmp (resdata, key) != 0, 0)
279 || __builtin_expect (fstat64 (mapfd, &st) != 0, 0)
280 || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0))
283 struct database_pers_head head;
284 if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head,
286 != sizeof (head), 0))
289 if (__builtin_expect (head.version != DB_VERSION, 0)
290 || __builtin_expect (head.header_size != sizeof (head), 0)
291 /* This really should not happen but who knows, maybe the update
293 || __builtin_expect (! head.nscd_certainly_running
294 && head.timestamp + MAPPING_TIMEOUT < time (NULL),
298 size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN)
301 if (__builtin_expect (st.st_size < size, 0))
304 /* The file is large enough, map it now. */
305 void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0);
306 if (__builtin_expect (mapping != MAP_FAILED, 1))
308 /* Allocate a record for the mapping. */
309 struct mapped_database *newp = malloc (sizeof (*newp));
312 /* Ugh, after all we went through the memory allocation failed. */
313 __munmap (mapping, size);
317 newp->head = mapping;
318 newp->data = ((char *) mapping + head.header_size
319 + roundup (head.module * sizeof (ref_t), ALIGN));
320 newp->mapsize = size;
321 newp->datasize = head.data_size;
322 /* Set counter to 1 to show it is usable. */
333 __set_errno (saved_errno);
334 #endif /* SCM_RIGHTS */
336 struct mapped_database *oldval = *mappedp;
339 if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
340 __nscd_unmap (oldval);
346 struct mapped_database *
347 __nscd_get_map_ref (request_type type, const char *name,
348 volatile struct locked_map_ptr *mapptr, int *gc_cyclep)
350 struct mapped_database *cur = mapptr->mapped;
351 if (cur == NO_MAPPING)
355 while (__builtin_expect (atomic_compare_and_exchange_val_acq (&mapptr->lock,
358 // XXX Best number of rounds?
359 if (__builtin_expect (++cnt > 5, 0))
365 cur = mapptr->mapped;
367 if (__builtin_expect (cur != NO_MAPPING, 1))
369 /* If not mapped or timestamp not updated, request new map. */
371 || (cur->head->nscd_certainly_running == 0
372 && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL))
373 || cur->head->data_size > cur->datasize)
374 cur = get_mapping (type, name,
375 (struct mapped_database **) &mapptr->mapped);
377 if (__builtin_expect (cur != NO_MAPPING, 1))
379 if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
383 atomic_increment (&cur->counter);
393 /* Don't return const struct datahead *, as eventhough the record
394 is normally constant, it can change arbitrarily during nscd
395 garbage collection. */
397 __nscd_cache_search (request_type type, const char *key, size_t keylen,
398 const struct mapped_database *mapped)
400 unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;
401 size_t datasize = mapped->datasize;
403 ref_t work = mapped->head->array[hash];
404 while (work != ENDREF && work + sizeof (struct hashentry) <= datasize)
406 struct hashentry *here = (struct hashentry *) (mapped->data + work);
408 #ifndef _STRING_ARCH_unaligned
409 /* Although during garbage collection when moving struct hashentry
410 records around we first copy from old to new location and then
411 adjust pointer from previous hashentry to it, there is no barrier
412 between those memory writes. It is very unlikely to hit it,
413 so check alignment only if a misaligned load can crash the
415 if ((uintptr_t) here & (__alignof__ (*here) - 1))
419 if (type == here->type
420 && keylen == here->len
421 && here->key + keylen <= datasize
422 && memcmp (key, mapped->data + here->key, keylen) == 0
423 && here->packet + sizeof (struct datahead) <= datasize)
425 /* We found the entry. Increment the appropriate counter. */
427 = (struct datahead *) (mapped->data + here->packet);
429 #ifndef _STRING_ARCH_unaligned
430 if ((uintptr_t) dh & (__alignof__ (*dh) - 1))
434 /* See whether we must ignore the entry or whether something
435 is wrong because garbage collection is in progress. */
436 if (dh->usable && here->packet + dh->allocsize <= datasize)
447 /* Create a socket connected to a name. */
449 __nscd_open_socket (const char *key, size_t keylen, request_type type,
450 void *response, size_t responselen)
452 /* This should never happen and it is something the nscd daemon
453 enforces, too. He it helps to limit the amount of stack
455 if (keylen > MAXKEYLEN)
458 int saved_errno = errno;
460 int sock = open_socket (type, key, keylen);
464 if (wait_on_socket (sock) > 0)
466 ssize_t nbytes = TEMP_FAILURE_RETRY (__read (sock, response,
468 if (nbytes == (ssize_t) responselen)
472 close_not_cancel_no_status (sock);
475 __set_errno (saved_errno);