3 * Copyright (c) 2009, Sun Microsystems, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * - Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * - Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * - Neither the name of Sun Microsystems, Inc. nor the names of its
14 * contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
31 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 #include <sys/cdefs.h>
37 * svc_dg.c, Server side for connectionless RPC.
39 * Does some caching in the hopes of achieving execute-at-most-once semantics.
42 #include <reentrant.h>
43 #include <sys/types.h>
44 #include <sys/socket.h>
46 #include <rpc/svc_dg.h>
52 #include <netconfig.h>
58 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
59 #define rpc_buffer(xprt) ((xprt)->xp_p1)
62 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
65 static void svc_dg_ops(SVCXPRT *);
66 static enum xprt_stat svc_dg_stat(SVCXPRT *);
67 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
68 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
69 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, void *);
70 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, void *);
71 static void svc_dg_destroy(SVCXPRT *);
72 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
73 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
74 static void cache_set(SVCXPRT *, size_t);
75 int svc_dg_enablecache(SVCXPRT *, u_int);
76 static void svc_dg_enable_pktinfo(int, const struct __rpc_sockinfo *);
77 static int svc_dg_valid_pktinfo(struct msghdr *);
81 * xprt = svc_dg_create(sock, sendsize, recvsize);
82 * Does other connectionless specific initializations.
83 * Once *xprt is initialized, it is registered.
84 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
85 * system defaults are chosen.
86 * The routines returns NULL if a problem occurred.
88 static const char svc_dg_str[] = "svc_dg_create: %s";
89 static const char svc_dg_err1[] = "could not get transport information";
90 static const char svc_dg_err2[] = " transport does not support data transfer";
91 static const char __no_mem_str[] = "out of memory";
94 svc_dg_create(fd, sendsize, recvsize)
100 struct svc_dg_data *su = NULL;
101 struct __rpc_sockinfo si;
102 struct sockaddr_storage ss;
105 if (!__rpc_fd2sockinfo(fd, &si)) {
106 warnx(svc_dg_str, svc_dg_err1);
110 * Find the receive and the send size
112 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
113 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
114 if ((sendsize == 0) || (recvsize == 0)) {
115 warnx(svc_dg_str, svc_dg_err2);
119 xprt = mem_alloc(sizeof (SVCXPRT));
122 memset(xprt, 0, sizeof (SVCXPRT));
124 su = mem_alloc(sizeof (*su));
127 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
128 if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
130 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
135 xprt->xp_auth = NULL;
136 xprt->xp_verf.oa_base = su->su_verfbody;
138 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
141 if (getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
143 __rpc_set_netbuf(&xprt->xp_ltaddr, &ss, slen);
145 /* Enable reception of IP*_PKTINFO control msgs */
146 svc_dg_enable_pktinfo(fd, &si);
151 (void) warnx(svc_dg_str, __no_mem_str);
154 (void) mem_free(su, sizeof (*su));
155 (void) mem_free(xprt, sizeof (SVCXPRT));
161 static enum xprt_stat
169 svc_dg_recv(xprt, msg)
173 struct svc_dg_data *su = su_data(xprt);
174 XDR *xdrs = &(su->su_xdrs);
176 struct sockaddr_storage ss;
177 struct msghdr *mesgp;
183 iov.iov_base = rpc_buffer(xprt);
184 iov.iov_len = su->su_iosz;
185 mesgp = &su->su_msghdr;
186 memset(mesgp, 0, sizeof(*mesgp));
187 mesgp->msg_iov = &iov;
188 mesgp->msg_iovlen = 1;
189 mesgp->msg_name = (struct sockaddr *)(void *) &ss;
190 mesgp->msg_namelen = sizeof (struct sockaddr_storage);
191 mesgp->msg_control = su->su_cmsg;
192 mesgp->msg_controllen = sizeof(su->su_cmsg);
194 rlen = recvmsg(xprt->xp_fd, mesgp, 0);
195 if (rlen == -1 && errno == EINTR)
197 if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
199 __rpc_set_netbuf(&xprt->xp_rtaddr, &ss, mesgp->msg_namelen);
201 /* Check whether there's an IP_PKTINFO or IP6_PKTINFO control message.
202 * If yes, preserve it for svc_dg_reply; otherwise just zap any cmsgs */
203 if (!svc_dg_valid_pktinfo(mesgp)) {
204 mesgp->msg_control = NULL;
205 mesgp->msg_controllen = 0;
208 __xprt_set_raddr(xprt, &ss);
209 xdrs->x_op = XDR_DECODE;
211 if (! xdr_callmsg(xdrs, msg)) {
214 su->su_xid = msg->rm_xid;
215 if (su->su_cache != NULL) {
216 if (cache_get(xprt, msg, &reply, &replylen)) {
217 iov.iov_base = reply;
218 iov.iov_len = replylen;
219 (void) sendmsg(xprt->xp_fd, mesgp, 0);
227 svc_dg_reply(xprt, msg)
231 struct svc_dg_data *su = su_data(xprt);
232 XDR *xdrs = &(su->su_xdrs);
236 xdrproc_t xdr_results;
237 caddr_t xdr_location;
240 if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
241 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
243 xdr_results = msg->acpted_rply.ar_results.proc;
244 xdr_location = msg->acpted_rply.ar_results.where;
246 msg->acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
247 msg->acpted_rply.ar_results.where = NULL;
251 xdrs->x_op = XDR_ENCODE;
253 msg->rm_xid = su->su_xid;
254 if (xdr_replymsg(xdrs, msg) &&
255 (!has_args || (xprt->xp_auth &&
256 SVCAUTH_WRAP(xprt->xp_auth, xdrs, xdr_results, xdr_location)))) {
257 struct msghdr *msg = &su->su_msghdr;
260 iov.iov_base = rpc_buffer(xprt);
261 iov.iov_len = slen = XDR_GETPOS(xdrs);
264 msg->msg_name = (struct sockaddr *)(void *) xprt->xp_rtaddr.buf;
265 msg->msg_namelen = xprt->xp_rtaddr.len;
266 /* cmsg already set in svc_dg_recv */
268 if (sendmsg(xprt->xp_fd, msg, 0) == (ssize_t) slen) {
271 cache_set(xprt, slen);
278 svc_dg_getargs(xprt, xdr_args, args_ptr)
283 if (! SVCAUTH_UNWRAP(xprt->xp_auth, &(su_data(xprt)->su_xdrs),
284 xdr_args, args_ptr)) {
291 svc_dg_freeargs(xprt, xdr_args, args_ptr)
296 XDR *xdrs = &(su_data(xprt)->su_xdrs);
298 xdrs->x_op = XDR_FREE;
299 return (*xdr_args)(xdrs, args_ptr);
306 struct svc_dg_data *su = su_data(xprt);
308 xprt_unregister(xprt);
309 if (xprt->xp_fd != -1)
310 (void)close(xprt->xp_fd);
311 if (xprt->xp_auth != NULL) {
312 SVCAUTH_DESTROY(xprt->xp_auth);
313 xprt->xp_auth = NULL;
315 XDR_DESTROY(&(su->su_xdrs));
316 (void) mem_free(rpc_buffer(xprt), su->su_iosz);
317 (void) mem_free(su, sizeof (*su));
318 if (xprt->xp_rtaddr.buf)
319 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
320 if (xprt->xp_ltaddr.buf)
321 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
323 (void) free(xprt->xp_tp);
324 (void) mem_free(xprt, sizeof (SVCXPRT));
329 svc_dg_control(xprt, rq, in)
341 static struct xp_ops ops;
342 static struct xp_ops2 ops2;
343 extern mutex_t ops_lock;
345 /* VARIABLES PROTECTED BY ops_lock: ops */
347 mutex_lock(&ops_lock);
348 if (ops.xp_recv == NULL) {
349 ops.xp_recv = svc_dg_recv;
350 ops.xp_stat = svc_dg_stat;
351 ops.xp_getargs = svc_dg_getargs;
352 ops.xp_reply = svc_dg_reply;
353 ops.xp_freeargs = svc_dg_freeargs;
354 ops.xp_destroy = svc_dg_destroy;
355 ops2.xp_control = svc_dg_control;
358 xprt->xp_ops2 = &ops2;
359 mutex_unlock(&ops_lock);
362 /* The CACHING COMPONENT */
365 * Could have been a separate file, but some part of it depends upon the
366 * private structure of the client handle.
368 * Fifo cache for cl server
369 * Copies pointers to reply buffers into fifo cache
370 * Buffers are sent again if retransmissions are detected.
373 #define SPARSENESS 4 /* 75% sparse */
375 #define ALLOC(type, size) \
376 (type *) mem_alloc((sizeof (type) * (size)))
378 #define MEMZERO(addr, type, size) \
379 (void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
381 #define FREE(addr, type, size) \
382 mem_free((addr), (sizeof (type) * (size)))
385 * An entry in the cache
387 typedef struct cache_node *cache_ptr;
390 * Index into cache is xid, proc, vers, prog and address
393 rpcproc_t cache_proc;
394 rpcvers_t cache_vers;
395 rpcprog_t cache_prog;
396 struct netbuf cache_addr;
398 * The cached reply and length
401 size_t cache_replylen;
403 * Next node on the list, if there is a collision
405 cache_ptr cache_next;
412 u_int uc_size; /* size of cache */
413 cache_ptr *uc_entries; /* hash table of entries in cache */
414 cache_ptr *uc_fifo; /* fifo list of entries in cache */
415 u_int uc_nextvictim; /* points to next victim in fifo list */
416 rpcprog_t uc_prog; /* saved program number */
417 rpcvers_t uc_vers; /* saved version number */
418 rpcproc_t uc_proc; /* saved procedure number */
423 * the hashing function
425 #define CACHE_LOC(transp, xid) \
426 (xid % (SPARSENESS * ((struct cl_cache *) \
427 su_data(transp)->su_cache)->uc_size))
429 extern mutex_t dupreq_lock;
432 * Enable use of the cache. Returns 1 on success, 0 on failure.
433 * Note: there is no disable.
435 static const char cache_enable_str[] = "svc_enablecache: %s %s";
436 static const char alloc_err[] = "could not allocate cache ";
437 static const char enable_err[] = "cache already enabled";
440 svc_dg_enablecache(transp, size)
444 struct svc_dg_data *su = su_data(transp);
447 mutex_lock(&dupreq_lock);
448 if (su->su_cache != NULL) {
449 (void) warnx(cache_enable_str, enable_err, " ");
450 mutex_unlock(&dupreq_lock);
453 uc = ALLOC(struct cl_cache, 1);
455 warnx(cache_enable_str, alloc_err, " ");
456 mutex_unlock(&dupreq_lock);
460 uc->uc_nextvictim = 0;
461 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
462 if (uc->uc_entries == NULL) {
463 warnx(cache_enable_str, alloc_err, "data");
464 FREE(uc, struct cl_cache, 1);
465 mutex_unlock(&dupreq_lock);
468 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
469 uc->uc_fifo = ALLOC(cache_ptr, size);
470 if (uc->uc_fifo == NULL) {
471 warnx(cache_enable_str, alloc_err, "fifo");
472 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
473 FREE(uc, struct cl_cache, 1);
474 mutex_unlock(&dupreq_lock);
477 MEMZERO(uc->uc_fifo, cache_ptr, size);
478 su->su_cache = (char *)(void *)uc;
479 mutex_unlock(&dupreq_lock);
484 * Set an entry in the cache. It assumes that the uc entry is set from
485 * the earlier call to cache_get() for the same procedure. This will always
486 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
487 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
488 * not available at svc_dg_reply time.
491 static const char cache_set_str[] = "cache_set: %s";
492 static const char cache_set_err1[] = "victim not found";
493 static const char cache_set_err2[] = "victim alloc failed";
494 static const char cache_set_err3[] = "could not allocate new rpc buffer";
497 cache_set(xprt, replylen)
503 struct svc_dg_data *su = su_data(xprt);
504 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
507 struct netconfig *nconf;
510 mutex_lock(&dupreq_lock);
512 * Find space for the new entry, either by
513 * reusing an old entry, or by mallocing a new one
515 victim = uc->uc_fifo[uc->uc_nextvictim];
516 if (victim != NULL) {
517 loc = CACHE_LOC(xprt, victim->cache_xid);
518 for (vicp = &uc->uc_entries[loc];
519 *vicp != NULL && *vicp != victim;
520 vicp = &(*vicp)->cache_next)
523 warnx(cache_set_str, cache_set_err1);
524 mutex_unlock(&dupreq_lock);
527 *vicp = victim->cache_next; /* remove from cache */
528 newbuf = victim->cache_reply;
530 victim = ALLOC(struct cache_node, 1);
531 if (victim == NULL) {
532 warnx(cache_set_str, cache_set_err2);
533 mutex_unlock(&dupreq_lock);
536 newbuf = mem_alloc(su->su_iosz);
537 if (newbuf == NULL) {
538 warnx(cache_set_str, cache_set_err3);
539 FREE(victim, struct cache_node, 1);
540 mutex_unlock(&dupreq_lock);
548 if (libtirpc_debug_level > 3) {
549 if ((nconf = getnetconfigent(xprt->xp_netid))) {
550 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
551 freenetconfigent(nconf);
553 ("cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
554 su->su_xid, uc->uc_prog, uc->uc_vers,
555 uc->uc_proc, uaddr));
559 victim->cache_replylen = replylen;
560 victim->cache_reply = rpc_buffer(xprt);
561 rpc_buffer(xprt) = newbuf;
562 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
563 su->su_iosz, XDR_ENCODE);
564 victim->cache_xid = su->su_xid;
565 victim->cache_proc = uc->uc_proc;
566 victim->cache_vers = uc->uc_vers;
567 victim->cache_prog = uc->uc_prog;
568 victim->cache_addr = xprt->xp_rtaddr;
569 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
570 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
571 (size_t)xprt->xp_rtaddr.len);
572 loc = CACHE_LOC(xprt, victim->cache_xid);
573 victim->cache_next = uc->uc_entries[loc];
574 uc->uc_entries[loc] = victim;
575 uc->uc_fifo[uc->uc_nextvictim++] = victim;
576 uc->uc_nextvictim %= uc->uc_size;
577 mutex_unlock(&dupreq_lock);
581 * Try to get an entry from the cache
582 * return 1 if found, 0 if not found and set the stage for cache_set()
585 cache_get(xprt, msg, replyp, replylenp)
593 struct svc_dg_data *su = su_data(xprt);
594 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
595 struct netconfig *nconf;
598 mutex_lock(&dupreq_lock);
599 loc = CACHE_LOC(xprt, su->su_xid);
600 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
601 if (ent->cache_xid == su->su_xid &&
602 ent->cache_proc == msg->rm_call.cb_proc &&
603 ent->cache_vers == msg->rm_call.cb_vers &&
604 ent->cache_prog == msg->rm_call.cb_prog &&
605 ent->cache_addr.len == xprt->xp_rtaddr.len &&
606 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
607 xprt->xp_rtaddr.len) == 0)) {
608 if (libtirpc_debug_level > 3) {
609 if ((nconf = getnetconfigent(xprt->xp_netid))) {
610 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
611 freenetconfigent(nconf);
613 ("cache entry found for xid=%x prog=%d"
614 "vers=%d proc=%d for rmtaddr=%s\n",
615 su->su_xid, msg->rm_call.cb_prog,
616 msg->rm_call.cb_vers,
617 msg->rm_call.cb_proc, uaddr));
621 *replyp = ent->cache_reply;
622 *replylenp = ent->cache_replylen;
623 mutex_unlock(&dupreq_lock);
628 * Failed to find entry
629 * Remember a few things so we can do a set later
631 uc->uc_proc = msg->rm_call.cb_proc;
632 uc->uc_vers = msg->rm_call.cb_vers;
633 uc->uc_prog = msg->rm_call.cb_prog;
634 mutex_unlock(&dupreq_lock);
639 * Enable reception of PKTINFO control messages
642 svc_dg_enable_pktinfo(int fd, const struct __rpc_sockinfo *si)
648 (void) setsockopt(fd, SOL_IP, IP_PKTINFO, &val, sizeof(val));
652 (void) setsockopt(fd, SOL_IPV6, IPV6_RECVPKTINFO, &val, sizeof(val));
659 * When given a control message received from the socket
660 * layer, check whether it contains valid PKTINFO data matching
661 * the address family of the peer address.
664 svc_dg_valid_pktinfo(struct msghdr *msg)
666 struct cmsghdr *cmsg;
671 if (msg->msg_flags & MSG_CTRUNC)
674 cmsg = CMSG_FIRSTHDR(msg);
675 if (cmsg == NULL || CMSG_NXTHDR(msg, cmsg) != NULL)
678 switch (((struct sockaddr *) msg->msg_name)->sa_family) {
680 if (cmsg->cmsg_level != SOL_IP
681 || cmsg->cmsg_type != IP_PKTINFO
682 || cmsg->cmsg_len < CMSG_LEN(sizeof (struct in_pktinfo))) {
685 struct in_pktinfo *pkti;
687 pkti = (struct in_pktinfo *) CMSG_DATA (cmsg);
688 pkti->ipi_ifindex = 0;
694 if (cmsg->cmsg_level != SOL_IPV6
695 || cmsg->cmsg_type != IPV6_PKTINFO
696 || cmsg->cmsg_len < CMSG_LEN(sizeof (struct in6_pktinfo))) {
699 struct in6_pktinfo *pkti;
701 pkti = (struct in6_pktinfo *) CMSG_DATA (cmsg);
702 pkti->ipi6_ifindex = 0;