1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/svcsock.c
5 * These are the RPC server socket internals.
7 * The server scheduling algorithm does not always distribute the load
8 * evenly when servicing a single client. May need to modify the
9 * svc_xprt_enqueue procedure...
11 * TCP support is largely untested and may be a little slow. The problem
12 * is that we currently do two separate recvfrom's, one for the 4-byte
13 * record length, and the second for the actual record. This could possibly
14 * be improved by always reading a minimum size of around 100 bytes and
15 * tucking any superfluous bytes away in a temporary store. Still, that
16 * leaves write requests out in the rain. An alternative may be to peek at
17 * the first skb in the queue, and if it matches the next TCP sequence
18 * number, to extract the record marker. Yuck.
20 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/fcntl.h>
28 #include <linux/net.h>
30 #include <linux/inet.h>
31 #include <linux/udp.h>
32 #include <linux/tcp.h>
33 #include <linux/unistd.h>
34 #include <linux/slab.h>
35 #include <linux/netdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/file.h>
38 #include <linux/freezer.h>
40 #include <net/checksum.h>
45 #include <net/tcp_states.h>
46 #include <net/tls_prot.h>
47 #include <net/handshake.h>
48 #include <linux/uaccess.h>
49 #include <linux/highmem.h>
50 #include <asm/ioctls.h>
51 #include <linux/key.h>
53 #include <linux/sunrpc/types.h>
54 #include <linux/sunrpc/clnt.h>
55 #include <linux/sunrpc/xdr.h>
56 #include <linux/sunrpc/msg_prot.h>
57 #include <linux/sunrpc/svcsock.h>
58 #include <linux/sunrpc/stats.h>
59 #include <linux/sunrpc/xprt.h>
61 #include <trace/events/sock.h>
62 #include <trace/events/sunrpc.h>
67 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
69 /* To-do: to avoid tying up an nfsd thread while waiting for a
70 * handshake request, the request could instead be deferred.
73 SVC_HANDSHAKE_TO = 5U * HZ
76 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
78 static int svc_udp_recvfrom(struct svc_rqst *);
79 static int svc_udp_sendto(struct svc_rqst *);
80 static void svc_sock_detach(struct svc_xprt *);
81 static void svc_tcp_sock_detach(struct svc_xprt *);
82 static void svc_sock_free(struct svc_xprt *);
84 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
85 struct net *, struct sockaddr *,
87 #ifdef CONFIG_DEBUG_LOCK_ALLOC
88 static struct lock_class_key svc_key[2];
89 static struct lock_class_key svc_slock_key[2];
91 static void svc_reclassify_socket(struct socket *sock)
93 struct sock *sk = sock->sk;
95 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
98 switch (sk->sk_family) {
100 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
102 "sk_xprt.xpt_lock-AF_INET-NFSD",
107 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
109 "sk_xprt.xpt_lock-AF_INET6-NFSD",
118 static void svc_reclassify_socket(struct socket *sock)
124 * svc_tcp_release_ctxt - Release transport-related resources
125 * @xprt: the transport which owned the context
126 * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
129 static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
134 * svc_udp_release_ctxt - Release transport-related resources
135 * @xprt: the transport which owned the context
136 * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
139 static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
141 struct sk_buff *skb = ctxt;
147 union svc_pktinfo_u {
148 struct in_pktinfo pkti;
149 struct in6_pktinfo pkti6;
151 #define SVC_PKTINFO_SPACE \
152 CMSG_SPACE(sizeof(union svc_pktinfo_u))
154 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
156 struct svc_sock *svsk =
157 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
158 switch (svsk->sk_sk->sk_family) {
160 struct in_pktinfo *pki = CMSG_DATA(cmh);
162 cmh->cmsg_level = SOL_IP;
163 cmh->cmsg_type = IP_PKTINFO;
164 pki->ipi_ifindex = 0;
165 pki->ipi_spec_dst.s_addr =
166 svc_daddr_in(rqstp)->sin_addr.s_addr;
167 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
172 struct in6_pktinfo *pki = CMSG_DATA(cmh);
173 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
175 cmh->cmsg_level = SOL_IPV6;
176 cmh->cmsg_type = IPV6_PKTINFO;
177 pki->ipi6_ifindex = daddr->sin6_scope_id;
178 pki->ipi6_addr = daddr->sin6_addr;
179 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
185 static int svc_sock_result_payload(struct svc_rqst *rqstp, unsigned int offset,
192 * Report socket names for nfsdfs
194 static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
196 const struct sock *sk = svsk->sk_sk;
197 const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
201 switch (sk->sk_family) {
203 len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n",
205 &inet_sk(sk)->inet_rcv_saddr,
206 inet_sk(sk)->inet_num);
208 #if IS_ENABLED(CONFIG_IPV6)
210 len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
212 &sk->sk_v6_rcv_saddr,
213 inet_sk(sk)->inet_num);
217 len = snprintf(buf, remaining, "*unknown-%d*\n",
221 if (len >= remaining) {
223 return -ENAMETOOLONG;
229 svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
230 struct cmsghdr *cmsg, int ret)
232 u8 content_type = tls_get_record_type(sock->sk, cmsg);
233 u8 level, description;
235 switch (content_type) {
238 case TLS_RECORD_TYPE_DATA:
239 /* TLS sets EOR at the end of each application data
240 * record, even though there might be more frames
241 * waiting to be decrypted.
243 msg->msg_flags &= ~MSG_EOR;
245 case TLS_RECORD_TYPE_ALERT:
246 tls_alert_recv(sock->sk, msg, &level, &description);
247 ret = (level == TLS_ALERT_LEVEL_FATAL) ?
251 /* discard this record type */
258 svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
262 u8 buf[CMSG_SPACE(sizeof(u8))];
264 struct socket *sock = svsk->sk_sock;
267 msg->msg_control = &u;
268 msg->msg_controllen = sizeof(u);
269 ret = sock_recvmsg(sock, msg, MSG_DONTWAIT);
270 if (unlikely(msg->msg_controllen != sizeof(u)))
271 ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret);
275 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
276 static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek)
278 struct bvec_iter bi = {
279 .bi_size = size + seek,
283 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
284 for_each_bvec(bv, bvec, bi, bi)
285 flush_dcache_page(bv.bv_page);
288 static inline void svc_flush_bvec(const struct bio_vec *bvec, size_t size,
295 * Read from @rqstp's transport socket. The incoming message fills whole
296 * pages in @rqstp's rq_pages array until the last page of the message
297 * has been received into a partial page.
299 static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
302 struct svc_sock *svsk =
303 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
304 struct bio_vec *bvec = rqstp->rq_bvec;
305 struct msghdr msg = { NULL };
310 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
312 for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE)
313 bvec_set_page(&bvec[i], rqstp->rq_pages[i], PAGE_SIZE, 0);
314 rqstp->rq_respages = &rqstp->rq_pages[i];
315 rqstp->rq_next_page = rqstp->rq_respages + 1;
317 iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen);
319 iov_iter_advance(&msg.msg_iter, seek);
322 len = svc_tcp_sock_recv_cmsg(svsk, &msg);
324 svc_flush_bvec(bvec, len, seek);
326 /* If we read a full record, then assume there may be more
327 * data to read (stream based sockets only!)
330 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
336 * Set socket snd and rcv buffer lengths
338 static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
340 unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
341 struct socket *sock = svsk->sk_sock;
343 nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
346 sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
347 sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
348 sock->sk->sk_write_space(sock->sk);
349 release_sock(sock->sk);
352 static void svc_sock_secure_port(struct svc_rqst *rqstp)
354 if (svc_port_is_privileged(svc_addr(rqstp)))
355 set_bit(RQ_SECURE, &rqstp->rq_flags);
357 clear_bit(RQ_SECURE, &rqstp->rq_flags);
361 * INET callback when data has been received on the socket.
363 static void svc_data_ready(struct sock *sk)
365 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
367 trace_sk_data_ready(sk);
370 /* Refer to svc_setup_socket() for details. */
373 trace_svcsock_data_ready(&svsk->sk_xprt, 0);
374 if (test_bit(XPT_HANDSHAKE, &svsk->sk_xprt.xpt_flags))
376 if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
377 svc_xprt_enqueue(&svsk->sk_xprt);
382 * INET callback when space is newly available on the socket.
384 static void svc_write_space(struct sock *sk)
386 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
389 /* Refer to svc_setup_socket() for details. */
391 trace_svcsock_write_space(&svsk->sk_xprt, 0);
392 svsk->sk_owspace(sk);
393 svc_xprt_enqueue(&svsk->sk_xprt);
397 static int svc_tcp_has_wspace(struct svc_xprt *xprt)
399 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
401 if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
403 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
406 static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
408 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
410 sock_no_linger(svsk->sk_sock->sk);
414 * svc_tcp_handshake_done - Handshake completion handler
415 * @data: address of xprt to wake
416 * @status: status of handshake
417 * @peerid: serial number of key containing the remote peer's identity
419 * If a security policy is specified as an export option, we don't
420 * have a specific export here to check. So we set a "TLS session
421 * is present" flag on the xprt and let an upper layer enforce local
424 static void svc_tcp_handshake_done(void *data, int status, key_serial_t peerid)
426 struct svc_xprt *xprt = data;
427 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
430 if (peerid != TLS_NO_PEERID)
431 set_bit(XPT_PEER_AUTH, &xprt->xpt_flags);
432 set_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
434 clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
435 complete_all(&svsk->sk_handshake_done);
439 * svc_tcp_handshake - Perform a transport-layer security handshake
440 * @xprt: connected transport endpoint
443 static void svc_tcp_handshake(struct svc_xprt *xprt)
445 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
446 struct sock *sk = svsk->sk_sock->sk;
447 struct tls_handshake_args args = {
448 .ta_sock = svsk->sk_sock,
449 .ta_done = svc_tcp_handshake_done,
454 trace_svc_tls_upcall(xprt);
456 clear_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
457 init_completion(&svsk->sk_handshake_done);
459 ret = tls_server_hello_x509(&args, GFP_KERNEL);
461 trace_svc_tls_not_started(xprt);
465 ret = wait_for_completion_interruptible_timeout(&svsk->sk_handshake_done,
468 if (tls_handshake_cancel(sk)) {
469 trace_svc_tls_timed_out(xprt);
474 if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) {
475 trace_svc_tls_unavailable(xprt);
479 /* Mark the transport ready in case the remote sent RPC
480 * traffic before the kernel received the handshake
481 * completion downcall.
483 set_bit(XPT_DATA, &xprt->xpt_flags);
484 svc_xprt_enqueue(xprt);
488 set_bit(XPT_CLOSE, &xprt->xpt_flags);
490 clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
491 set_bit(XPT_DATA, &xprt->xpt_flags);
492 svc_xprt_enqueue(xprt);
496 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
498 static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
501 struct in_pktinfo *pki = CMSG_DATA(cmh);
502 struct sockaddr_in *daddr = svc_daddr_in(rqstp);
504 if (cmh->cmsg_type != IP_PKTINFO)
507 daddr->sin_family = AF_INET;
508 daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
513 * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
515 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
518 struct in6_pktinfo *pki = CMSG_DATA(cmh);
519 struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
521 if (cmh->cmsg_type != IPV6_PKTINFO)
524 daddr->sin6_family = AF_INET6;
525 daddr->sin6_addr = pki->ipi6_addr;
526 daddr->sin6_scope_id = pki->ipi6_ifindex;
531 * Copy the UDP datagram's destination address to the rqstp structure.
532 * The 'destination' address in this case is the address to which the
533 * peer sent the datagram, i.e. our local address. For multihomed
534 * hosts, this can change from msg to msg. Note that only the IP
535 * address changes, the port number should remain the same.
537 static int svc_udp_get_dest_address(struct svc_rqst *rqstp,
540 switch (cmh->cmsg_level) {
542 return svc_udp_get_dest_address4(rqstp, cmh);
544 return svc_udp_get_dest_address6(rqstp, cmh);
551 * svc_udp_recvfrom - Receive a datagram from a UDP socket.
552 * @rqstp: request structure into which to receive an RPC Call
554 * Called in a loop when XPT_DATA has been set.
557 * On success, the number of bytes in a received RPC Call, or
558 * %0 if a complete RPC Call message was not ready to return
560 static int svc_udp_recvfrom(struct svc_rqst *rqstp)
562 struct svc_sock *svsk =
563 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
564 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
568 long all[SVC_PKTINFO_SPACE / sizeof(long)];
570 struct cmsghdr *cmh = &buffer.hdr;
571 struct msghdr msg = {
572 .msg_name = svc_addr(rqstp),
574 .msg_controllen = sizeof(buffer),
575 .msg_flags = MSG_DONTWAIT,
580 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
581 /* udp sockets need large rcvbuf as all pending
582 * requests are still in that buffer. sndbuf must
583 * also be large enough that there is enough space
584 * for one reply per thread. We count all threads
585 * rather than threads in a particular pool, which
586 * provides an upper bound on the number of threads
587 * which will access the socket.
589 svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
591 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
592 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
593 0, 0, MSG_PEEK | MSG_DONTWAIT);
596 skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err);
600 len = svc_addr_len(svc_addr(rqstp));
601 rqstp->rq_addrlen = len;
602 if (skb->tstamp == 0) {
603 skb->tstamp = ktime_get_real();
604 /* Don't enable netstamp, sunrpc doesn't
605 need that much accuracy */
607 sock_write_timestamp(svsk->sk_sk, skb->tstamp);
608 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
611 rqstp->rq_arg.len = len;
612 trace_svcsock_udp_recv(&svsk->sk_xprt, len);
614 rqstp->rq_prot = IPPROTO_UDP;
616 if (!svc_udp_get_dest_address(rqstp, cmh))
618 rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
620 if (skb_is_nonlinear(skb)) {
621 /* we have to copy */
623 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb))
628 /* we can use it in-place */
629 rqstp->rq_arg.head[0].iov_base = skb->data;
630 rqstp->rq_arg.head[0].iov_len = len;
631 if (skb_checksum_complete(skb))
633 rqstp->rq_xprt_ctxt = skb;
636 rqstp->rq_arg.page_base = 0;
637 if (len <= rqstp->rq_arg.head[0].iov_len) {
638 rqstp->rq_arg.head[0].iov_len = len;
639 rqstp->rq_arg.page_len = 0;
640 rqstp->rq_respages = rqstp->rq_pages+1;
642 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
643 rqstp->rq_respages = rqstp->rq_pages + 1 +
644 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
646 rqstp->rq_next_page = rqstp->rq_respages+1;
649 serv->sv_stats->netudpcnt++;
651 svc_sock_secure_port(rqstp);
652 svc_xprt_received(rqstp->rq_xprt);
656 if (err != -EAGAIN) {
657 /* possibly an icmp error */
658 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
660 trace_svcsock_udp_recv_err(&svsk->sk_xprt, err);
663 net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
664 cmh->cmsg_level, cmh->cmsg_type);
671 svc_xprt_received(rqstp->rq_xprt);
676 * svc_udp_sendto - Send out a reply on a UDP socket
677 * @rqstp: completed svc_rqst
679 * xpt_mutex ensures @rqstp's whole message is written to the socket
680 * without interruption.
682 * Returns the number of bytes sent, or a negative errno.
684 static int svc_udp_sendto(struct svc_rqst *rqstp)
686 struct svc_xprt *xprt = rqstp->rq_xprt;
687 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
688 struct xdr_buf *xdr = &rqstp->rq_res;
691 long all[SVC_PKTINFO_SPACE / sizeof(long)];
693 struct cmsghdr *cmh = &buffer.hdr;
694 struct msghdr msg = {
695 .msg_name = &rqstp->rq_addr,
696 .msg_namelen = rqstp->rq_addrlen,
698 .msg_controllen = sizeof(buffer),
703 svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
704 rqstp->rq_xprt_ctxt = NULL;
706 svc_set_cmsg_data(rqstp, cmh);
708 mutex_lock(&xprt->xpt_mutex);
710 if (svc_xprt_is_dead(xprt))
713 err = xdr_alloc_bvec(xdr, GFP_KERNEL);
717 err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
718 if (err == -ECONNREFUSED) {
719 /* ICMP error on earlier request. */
720 err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
723 trace_svcsock_udp_send(xprt, err);
725 mutex_unlock(&xprt->xpt_mutex);
731 mutex_unlock(&xprt->xpt_mutex);
735 static int svc_udp_has_wspace(struct svc_xprt *xprt)
737 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
738 struct svc_serv *serv = xprt->xpt_server;
739 unsigned long required;
742 * Set the SOCK_NOSPACE flag before checking the available
745 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
746 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
747 if (required*2 > sock_wspace(svsk->sk_sk))
749 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
753 static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
759 static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
763 static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
765 struct sockaddr *sa, int salen,
768 return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags);
771 static const struct svc_xprt_ops svc_udp_ops = {
772 .xpo_create = svc_udp_create,
773 .xpo_recvfrom = svc_udp_recvfrom,
774 .xpo_sendto = svc_udp_sendto,
775 .xpo_result_payload = svc_sock_result_payload,
776 .xpo_release_ctxt = svc_udp_release_ctxt,
777 .xpo_detach = svc_sock_detach,
778 .xpo_free = svc_sock_free,
779 .xpo_has_wspace = svc_udp_has_wspace,
780 .xpo_accept = svc_udp_accept,
781 .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
784 static struct svc_xprt_class svc_udp_class = {
786 .xcl_owner = THIS_MODULE,
787 .xcl_ops = &svc_udp_ops,
788 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
789 .xcl_ident = XPRT_TRANSPORT_UDP,
792 static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
794 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class,
795 &svsk->sk_xprt, serv);
796 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
797 svsk->sk_sk->sk_data_ready = svc_data_ready;
798 svsk->sk_sk->sk_write_space = svc_write_space;
800 /* initialise setting must have enough space to
801 * receive and respond to one request.
802 * svc_udp_recvfrom will re-adjust if necessary
804 svc_sock_setbufsize(svsk, 3);
806 /* data might have come in before data_ready set up */
807 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
808 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
810 /* make sure we get destination address info */
811 switch (svsk->sk_sk->sk_family) {
813 ip_sock_set_pktinfo(svsk->sk_sock->sk);
816 ip6_sock_set_recvpktinfo(svsk->sk_sock->sk);
824 * A data_ready event on a listening socket means there's a connection
825 * pending. Do not use state_change as a substitute for it.
827 static void svc_tcp_listen_data_ready(struct sock *sk)
829 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
831 trace_sk_data_ready(sk);
834 * This callback may called twice when a new connection
835 * is established as a child socket inherits everything
836 * from a parent LISTEN socket.
837 * 1) data_ready method of the parent socket will be called
838 * when one of child sockets become ESTABLISHED.
839 * 2) data_ready method of the child socket may be called
840 * when it receives data before the socket is accepted.
841 * In case of 2, we should ignore it silently and DO NOT
844 if (sk->sk_state != TCP_LISTEN)
848 /* Refer to svc_setup_socket() for details. */
851 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
852 svc_xprt_enqueue(&svsk->sk_xprt);
857 * A state change on a connected socket means it's dying or dead.
859 static void svc_tcp_state_change(struct sock *sk)
861 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
864 /* Refer to svc_setup_socket() for details. */
867 trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock);
868 if (sk->sk_state != TCP_ESTABLISHED)
869 svc_xprt_deferred_close(&svsk->sk_xprt);
874 * Accept a TCP connection
876 static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
878 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
879 struct sockaddr_storage addr;
880 struct sockaddr *sin = (struct sockaddr *) &addr;
881 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
882 struct socket *sock = svsk->sk_sock;
883 struct socket *newsock;
884 struct svc_sock *newsvsk;
890 clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
891 err = kernel_accept(sock, &newsock, O_NONBLOCK);
894 trace_svcsock_accept_err(xprt, serv->sv_name, err);
897 if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL)))
900 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
902 err = kernel_getpeername(newsock, sin);
904 trace_svcsock_getpeername_err(xprt, serv->sv_name, err);
905 goto failed; /* aborted connection or whatever */
909 /* Reset the inherited callbacks before calling svc_setup_socket */
910 newsock->sk->sk_state_change = svsk->sk_ostate;
911 newsock->sk->sk_data_ready = svsk->sk_odata;
912 newsock->sk->sk_write_space = svsk->sk_owspace;
914 /* make sure that a write doesn't block forever when
917 newsock->sk->sk_sndtimeo = HZ*30;
919 newsvsk = svc_setup_socket(serv, newsock,
920 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY));
923 svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen);
924 err = kernel_getsockname(newsock, sin);
926 if (unlikely(err < 0))
927 slen = offsetof(struct sockaddr, sa_data);
928 svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen);
930 if (sock_is_loopback(newsock->sk))
931 set_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags);
933 clear_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags);
935 serv->sv_stats->nettcpconn++;
937 return &newsvsk->sk_xprt;
944 static size_t svc_tcp_restore_pages(struct svc_sock *svsk,
945 struct svc_rqst *rqstp)
947 size_t len = svsk->sk_datalen;
948 unsigned int i, npages;
952 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
953 for (i = 0; i < npages; i++) {
954 if (rqstp->rq_pages[i] != NULL)
955 put_page(rqstp->rq_pages[i]);
956 BUG_ON(svsk->sk_pages[i] == NULL);
957 rqstp->rq_pages[i] = svsk->sk_pages[i];
958 svsk->sk_pages[i] = NULL;
960 rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]);
964 static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
966 unsigned int i, len, npages;
968 if (svsk->sk_datalen == 0)
970 len = svsk->sk_datalen;
971 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
972 for (i = 0; i < npages; i++) {
973 svsk->sk_pages[i] = rqstp->rq_pages[i];
974 rqstp->rq_pages[i] = NULL;
978 static void svc_tcp_clear_pages(struct svc_sock *svsk)
980 unsigned int i, len, npages;
982 if (svsk->sk_datalen == 0)
984 len = svsk->sk_datalen;
985 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
986 for (i = 0; i < npages; i++) {
987 if (svsk->sk_pages[i] == NULL) {
991 put_page(svsk->sk_pages[i]);
992 svsk->sk_pages[i] = NULL;
996 svsk->sk_datalen = 0;
1000 * Receive fragment record header into sk_marker.
1002 static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
1003 struct svc_rqst *rqstp)
1007 /* If we haven't gotten the record length yet,
1008 * get the next four bytes.
1010 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
1011 struct msghdr msg = { NULL };
1014 want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
1015 iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
1017 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
1018 len = svc_tcp_sock_recv_cmsg(svsk, &msg);
1021 svsk->sk_tcplen += len;
1023 /* call again to read the remaining bytes */
1026 trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker);
1027 if (svc_sock_reclen(svsk) + svsk->sk_datalen >
1028 svsk->sk_xprt.xpt_server->sv_max_mesg)
1031 return svc_sock_reclen(svsk);
1034 net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
1035 __func__, svsk->sk_xprt.xpt_server->sv_name,
1036 svc_sock_reclen(svsk));
1037 svc_xprt_deferred_close(&svsk->sk_xprt);
1042 static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1044 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
1045 struct rpc_rqst *req = NULL;
1046 struct kvec *src, *dst;
1047 __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
1056 spin_lock(&bc_xprt->queue_lock);
1057 req = xprt_lookup_rqst(bc_xprt, xid);
1059 goto unlock_notfound;
1061 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
1063 * XXX!: cheating for now! Only copying HEAD.
1064 * But we know this is good enough for now (in fact, for any
1065 * callback reply in the forseeable future).
1067 dst = &req->rq_private_buf.head[0];
1068 src = &rqstp->rq_arg.head[0];
1069 if (dst->iov_len < src->iov_len)
1070 goto unlock_eagain; /* whatever; just giving up. */
1071 memcpy(dst->iov_base, src->iov_base, src->iov_len);
1072 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
1073 rqstp->rq_arg.len = 0;
1074 spin_unlock(&bc_xprt->queue_lock);
1078 "%s: Got unrecognized reply: "
1079 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1080 __func__, ntohl(calldir),
1081 bc_xprt, ntohl(xid));
1083 spin_unlock(&bc_xprt->queue_lock);
1087 static void svc_tcp_fragment_received(struct svc_sock *svsk)
1089 /* If we have more data, signal svc_xprt_enqueue() to try again */
1090 svsk->sk_tcplen = 0;
1091 svsk->sk_marker = xdr_zero;
1095 * svc_tcp_recvfrom - Receive data from a TCP socket
1096 * @rqstp: request structure into which to receive an RPC Call
1098 * Called in a loop when XPT_DATA has been set.
1100 * Read the 4-byte stream record marker, then use the record length
1101 * in that marker to set up exactly the resources needed to receive
1102 * the next RPC message into @rqstp.
1105 * On success, the number of bytes in a received RPC Call, or
1106 * %0 if a complete RPC Call message was not ready to return
1108 * The zero return case handles partial receives and callback Replies.
1109 * The state of a partial receive is preserved in the svc_sock for
1110 * the next call to svc_tcp_recvfrom.
1112 static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1114 struct svc_sock *svsk =
1115 container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
1116 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1122 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1123 len = svc_tcp_read_marker(svsk, rqstp);
1127 base = svc_tcp_restore_pages(svsk, rqstp);
1128 want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
1129 len = svc_tcp_read_msg(rqstp, base + want, base);
1131 trace_svcsock_tcp_recv(&svsk->sk_xprt, len);
1132 svsk->sk_tcplen += len;
1133 svsk->sk_datalen += len;
1135 if (len != want || !svc_sock_final_rec(svsk))
1136 goto err_incomplete;
1137 if (svsk->sk_datalen < 8)
1140 rqstp->rq_arg.len = svsk->sk_datalen;
1141 rqstp->rq_arg.page_base = 0;
1142 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1143 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1144 rqstp->rq_arg.page_len = 0;
1146 rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1148 rqstp->rq_xprt_ctxt = NULL;
1149 rqstp->rq_prot = IPPROTO_TCP;
1150 if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
1151 set_bit(RQ_LOCAL, &rqstp->rq_flags);
1153 clear_bit(RQ_LOCAL, &rqstp->rq_flags);
1155 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
1158 len = receive_cb_reply(svsk, rqstp);
1160 /* Reset TCP read info */
1161 svsk->sk_datalen = 0;
1162 svc_tcp_fragment_received(svsk);
1167 svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
1169 serv->sv_stats->nettcpcnt++;
1171 svc_sock_secure_port(rqstp);
1172 svc_xprt_received(rqstp->rq_xprt);
1173 return rqstp->rq_arg.len;
1176 svc_tcp_save_pages(svsk, rqstp);
1177 if (len < 0 && len != -EAGAIN)
1180 svc_tcp_fragment_received(svsk);
1182 trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
1183 svc_sock_reclen(svsk),
1184 svsk->sk_tcplen - sizeof(rpc_fraghdr));
1189 trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0);
1192 svsk->sk_datalen = 0;
1194 trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
1195 svc_xprt_deferred_close(&svsk->sk_xprt);
1197 svc_xprt_received(rqstp->rq_xprt);
1198 return 0; /* record not complete */
1201 static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
1204 struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | flags, };
1206 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, vec->iov_len);
1207 return sock_sendmsg(sock, &msg);
1211 * MSG_SPLICE_PAGES is used exclusively to reduce the number of
1212 * copy operations in this path. Therefore the caller must ensure
1213 * that the pages backing @xdr are unchanging.
1215 * In addition, the logic assumes that * .bv_len is never larger
1218 static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
1219 rpc_fraghdr marker, unsigned int *sentp)
1221 const struct kvec *head = xdr->head;
1222 const struct kvec *tail = xdr->tail;
1224 .iov_base = &marker,
1225 .iov_len = sizeof(marker),
1227 struct msghdr msg = {
1233 ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
1237 ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
1241 if (ret != rm.iov_len)
1244 ret = svc_tcp_send_kvec(sock, head, 0);
1248 if (ret != head->iov_len)
1251 if (xdr_buf_pagecount(xdr))
1252 xdr->bvec[0].bv_offset = offset_in_page(xdr->page_base);
1254 msg.msg_flags = MSG_SPLICE_PAGES;
1255 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec,
1256 xdr_buf_pagecount(xdr), xdr->page_len);
1257 ret = sock_sendmsg(sock, &msg);
1262 if (tail->iov_len) {
1263 ret = svc_tcp_send_kvec(sock, tail, 0);
1274 * svc_tcp_sendto - Send out a reply on a TCP socket
1275 * @rqstp: completed svc_rqst
1277 * xpt_mutex ensures @rqstp's whole message is written to the socket
1278 * without interruption.
1280 * Returns the number of bytes sent, or a negative errno.
1282 static int svc_tcp_sendto(struct svc_rqst *rqstp)
1284 struct svc_xprt *xprt = rqstp->rq_xprt;
1285 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1286 struct xdr_buf *xdr = &rqstp->rq_res;
1287 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
1292 svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
1293 rqstp->rq_xprt_ctxt = NULL;
1295 atomic_inc(&svsk->sk_sendqlen);
1296 mutex_lock(&xprt->xpt_mutex);
1297 if (svc_xprt_is_dead(xprt))
1299 tcp_sock_set_cork(svsk->sk_sk, true);
1300 err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
1302 trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
1303 if (err < 0 || sent != (xdr->len + sizeof(marker)))
1305 if (atomic_dec_and_test(&svsk->sk_sendqlen))
1306 tcp_sock_set_cork(svsk->sk_sk, false);
1307 mutex_unlock(&xprt->xpt_mutex);
1311 atomic_dec(&svsk->sk_sendqlen);
1312 mutex_unlock(&xprt->xpt_mutex);
1315 pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
1316 xprt->xpt_server->sv_name,
1317 (err < 0) ? "got error" : "sent",
1318 (err < 0) ? err : sent, xdr->len);
1319 svc_xprt_deferred_close(xprt);
1320 atomic_dec(&svsk->sk_sendqlen);
1321 mutex_unlock(&xprt->xpt_mutex);
1325 static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
1327 struct sockaddr *sa, int salen,
1330 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
1333 static const struct svc_xprt_ops svc_tcp_ops = {
1334 .xpo_create = svc_tcp_create,
1335 .xpo_recvfrom = svc_tcp_recvfrom,
1336 .xpo_sendto = svc_tcp_sendto,
1337 .xpo_result_payload = svc_sock_result_payload,
1338 .xpo_release_ctxt = svc_tcp_release_ctxt,
1339 .xpo_detach = svc_tcp_sock_detach,
1340 .xpo_free = svc_sock_free,
1341 .xpo_has_wspace = svc_tcp_has_wspace,
1342 .xpo_accept = svc_tcp_accept,
1343 .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
1344 .xpo_handshake = svc_tcp_handshake,
1347 static struct svc_xprt_class svc_tcp_class = {
1349 .xcl_owner = THIS_MODULE,
1350 .xcl_ops = &svc_tcp_ops,
1351 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
1352 .xcl_ident = XPRT_TRANSPORT_TCP,
1355 void svc_init_xprt_sock(void)
1357 svc_reg_xprt_class(&svc_tcp_class);
1358 svc_reg_xprt_class(&svc_udp_class);
1361 void svc_cleanup_xprt_sock(void)
1363 svc_unreg_xprt_class(&svc_tcp_class);
1364 svc_unreg_xprt_class(&svc_udp_class);
1367 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1369 struct sock *sk = svsk->sk_sk;
1371 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class,
1372 &svsk->sk_xprt, serv);
1373 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
1374 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
1375 if (sk->sk_state == TCP_LISTEN) {
1376 strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
1377 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
1378 sk->sk_data_ready = svc_tcp_listen_data_ready;
1379 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
1381 sk->sk_state_change = svc_tcp_state_change;
1382 sk->sk_data_ready = svc_data_ready;
1383 sk->sk_write_space = svc_write_space;
1385 svsk->sk_marker = xdr_zero;
1386 svsk->sk_tcplen = 0;
1387 svsk->sk_datalen = 0;
1388 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
1390 tcp_sock_set_nodelay(sk);
1392 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1393 switch (sk->sk_state) {
1395 case TCP_ESTABLISHED:
1398 svc_xprt_deferred_close(&svsk->sk_xprt);
1403 void svc_sock_update_bufs(struct svc_serv *serv)
1406 * The number of server threads has changed. Update
1407 * rcvbuf and sndbuf accordingly on all sockets
1409 struct svc_sock *svsk;
1411 spin_lock_bh(&serv->sv_lock);
1412 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
1413 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1414 spin_unlock_bh(&serv->sv_lock);
1416 EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
1419 * Initialize socket for RPC use and create svc_sock struct
1421 static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1422 struct socket *sock,
1425 struct svc_sock *svsk;
1427 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1429 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
1431 return ERR_PTR(-ENOMEM);
1435 if (pmap_register) {
1438 err = svc_register(serv, sock_net(sock->sk), inet->sk_family,
1440 ntohs(inet_sk(inet)->inet_sport));
1443 return ERR_PTR(err);
1447 svsk->sk_sock = sock;
1449 svsk->sk_ostate = inet->sk_state_change;
1450 svsk->sk_odata = inet->sk_data_ready;
1451 svsk->sk_owspace = inet->sk_write_space;
1453 * This barrier is necessary in order to prevent race condition
1454 * with svc_data_ready(), svc_tcp_listen_data_ready(), and others
1455 * when calling callbacks above.
1458 inet->sk_user_data = svsk;
1460 /* Initialize the socket */
1461 if (sock->type == SOCK_DGRAM)
1462 svc_udp_init(svsk, serv);
1464 svc_tcp_init(svsk, serv);
1466 trace_svcsock_new(svsk, sock);
1471 * svc_addsock - add a listener socket to an RPC service
1472 * @serv: pointer to RPC service to which to add a new listener
1473 * @net: caller's network namespace
1474 * @fd: file descriptor of the new listener
1475 * @name_return: pointer to buffer to fill in with name of listener
1476 * @len: size of the buffer
1479 * Fills in socket name and returns positive length of name if successful.
1480 * Name is terminated with '\n'. On error, returns a negative errno
1483 int svc_addsock(struct svc_serv *serv, struct net *net, const int fd,
1484 char *name_return, const size_t len, const struct cred *cred)
1487 struct socket *so = sockfd_lookup(fd, &err);
1488 struct svc_sock *svsk = NULL;
1489 struct sockaddr_storage addr;
1490 struct sockaddr *sin = (struct sockaddr *)&addr;
1496 if (sock_net(so->sk) != net)
1498 err = -EAFNOSUPPORT;
1499 if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
1501 err = -EPROTONOSUPPORT;
1502 if (so->sk->sk_protocol != IPPROTO_TCP &&
1503 so->sk->sk_protocol != IPPROTO_UDP)
1506 if (so->state > SS_UNCONNECTED)
1509 if (!try_module_get(THIS_MODULE))
1511 svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS);
1513 module_put(THIS_MODULE);
1514 err = PTR_ERR(svsk);
1517 salen = kernel_getsockname(svsk->sk_sock, sin);
1519 svc_xprt_set_local(&svsk->sk_xprt, sin, salen);
1520 svsk->sk_xprt.xpt_cred = get_cred(cred);
1521 svc_add_new_perm_xprt(serv, &svsk->sk_xprt);
1522 return svc_one_sock_name(svsk, name_return, len);
1527 EXPORT_SYMBOL_GPL(svc_addsock);
1530 * Create socket for RPC service.
1532 static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
1535 struct sockaddr *sin, int len,
1538 struct svc_sock *svsk;
1539 struct socket *sock;
1542 struct sockaddr_storage addr;
1543 struct sockaddr *newsin = (struct sockaddr *)&addr;
1547 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1548 printk(KERN_WARNING "svc: only UDP and TCP "
1549 "sockets supported\n");
1550 return ERR_PTR(-EINVAL);
1553 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1554 switch (sin->sa_family) {
1562 return ERR_PTR(-EINVAL);
1565 error = __sock_create(net, family, type, protocol, &sock, 1);
1567 return ERR_PTR(error);
1569 svc_reclassify_socket(sock);
1572 * If this is an PF_INET6 listener, we want to avoid
1573 * getting requests from IPv4 remotes. Those should
1574 * be shunted to a PF_INET listener via rpcbind.
1576 if (family == PF_INET6)
1577 ip6_sock_set_v6only(sock->sk);
1578 if (type == SOCK_STREAM)
1579 sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
1580 error = kernel_bind(sock, sin, len);
1584 error = kernel_getsockname(sock, newsin);
1589 if (protocol == IPPROTO_TCP) {
1590 if ((error = kernel_listen(sock, 64)) < 0)
1594 svsk = svc_setup_socket(serv, sock, flags);
1596 error = PTR_ERR(svsk);
1599 svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen);
1600 return (struct svc_xprt *)svsk;
1603 return ERR_PTR(error);
1607 * Detach the svc_sock from the socket so that no
1608 * more callbacks occur.
1610 static void svc_sock_detach(struct svc_xprt *xprt)
1612 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1613 struct sock *sk = svsk->sk_sk;
1615 /* put back the old socket callbacks */
1617 sk->sk_state_change = svsk->sk_ostate;
1618 sk->sk_data_ready = svsk->sk_odata;
1619 sk->sk_write_space = svsk->sk_owspace;
1620 sk->sk_user_data = NULL;
1625 * Disconnect the socket, and reset the callbacks
1627 static void svc_tcp_sock_detach(struct svc_xprt *xprt)
1629 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1631 tls_handshake_close(svsk->sk_sock);
1633 svc_sock_detach(xprt);
1635 if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
1636 svc_tcp_clear_pages(svsk);
1637 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
1642 * Free the svc_sock's socket resources and the svc_sock itself.
1644 static void svc_sock_free(struct svc_xprt *xprt)
1646 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1647 struct socket *sock = svsk->sk_sock;
1649 trace_svcsock_free(svsk, sock);
1651 tls_handshake_cancel(sock->sk);