0ed181f53f32a0145c03b0006b92de5c7a0101aa
[platform/kernel/linux-exynos.git] / net / rxrpc / call_accept.c
1 /* incoming call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
23 #include <linux/circ_buf.h>
24 #include <net/sock.h>
25 #include <net/af_rxrpc.h>
26 #include <net/ip.h>
27 #include "ar-internal.h"
28
29 /*
30  * Preallocate a single service call, connection and peer and, if possible,
31  * give them a user ID and attach the user's side of the ID to them.
32  */
33 static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
34                                       struct rxrpc_backlog *b,
35                                       rxrpc_notify_rx_t notify_rx,
36                                       rxrpc_user_attach_call_t user_attach_call,
37                                       unsigned long user_call_ID, gfp_t gfp)
38 {
39         const void *here = __builtin_return_address(0);
40         struct rxrpc_call *call;
41         int max, tmp;
42         unsigned int size = RXRPC_BACKLOG_MAX;
43         unsigned int head, tail, call_head, call_tail;
44
45         max = rx->sk.sk_max_ack_backlog;
46         tmp = rx->sk.sk_ack_backlog;
47         if (tmp >= max) {
48                 _leave(" = -ENOBUFS [full %u]", max);
49                 return -ENOBUFS;
50         }
51         max -= tmp;
52
53         /* We don't need more conns and peers than we have calls, but on the
54          * other hand, we shouldn't ever use more peers than conns or conns
55          * than calls.
56          */
57         call_head = b->call_backlog_head;
58         call_tail = READ_ONCE(b->call_backlog_tail);
59         tmp = CIRC_CNT(call_head, call_tail, size);
60         if (tmp >= max) {
61                 _leave(" = -ENOBUFS [enough %u]", tmp);
62                 return -ENOBUFS;
63         }
64         max = tmp + 1;
65
66         head = b->peer_backlog_head;
67         tail = READ_ONCE(b->peer_backlog_tail);
68         if (CIRC_CNT(head, tail, size) < max) {
69                 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
70                 if (!peer)
71                         return -ENOMEM;
72                 b->peer_backlog[head] = peer;
73                 smp_store_release(&b->peer_backlog_head,
74                                   (head + 1) & (size - 1));
75         }
76
77         head = b->conn_backlog_head;
78         tail = READ_ONCE(b->conn_backlog_tail);
79         if (CIRC_CNT(head, tail, size) < max) {
80                 struct rxrpc_connection *conn;
81
82                 conn = rxrpc_prealloc_service_connection(gfp);
83                 if (!conn)
84                         return -ENOMEM;
85                 b->conn_backlog[head] = conn;
86                 smp_store_release(&b->conn_backlog_head,
87                                   (head + 1) & (size - 1));
88
89                 trace_rxrpc_conn(conn, rxrpc_conn_new_service,
90                                  atomic_read(&conn->usage), here);
91         }
92
93         /* Now it gets complicated, because calls get registered with the
94          * socket here, particularly if a user ID is preassigned by the user.
95          */
96         call = rxrpc_alloc_call(gfp);
97         if (!call)
98                 return -ENOMEM;
99         call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
100         call->state = RXRPC_CALL_SERVER_PREALLOC;
101
102         trace_rxrpc_call(call, rxrpc_call_new_service,
103                          atomic_read(&call->usage),
104                          here, (const void *)user_call_ID);
105
106         write_lock(&rx->call_lock);
107         if (user_attach_call) {
108                 struct rxrpc_call *xcall;
109                 struct rb_node *parent, **pp;
110
111                 /* Check the user ID isn't already in use */
112                 pp = &rx->calls.rb_node;
113                 parent = NULL;
114                 while (*pp) {
115                         parent = *pp;
116                         xcall = rb_entry(parent, struct rxrpc_call, sock_node);
117                         if (user_call_ID < call->user_call_ID)
118                                 pp = &(*pp)->rb_left;
119                         else if (user_call_ID > call->user_call_ID)
120                                 pp = &(*pp)->rb_right;
121                         else
122                                 goto id_in_use;
123                 }
124
125                 call->user_call_ID = user_call_ID;
126                 call->notify_rx = notify_rx;
127                 rxrpc_get_call(call, rxrpc_call_got_kernel);
128                 user_attach_call(call, user_call_ID);
129                 rxrpc_get_call(call, rxrpc_call_got_userid);
130                 rb_link_node(&call->sock_node, parent, pp);
131                 rb_insert_color(&call->sock_node, &rx->calls);
132                 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
133         }
134
135         list_add(&call->sock_link, &rx->sock_calls);
136
137         write_unlock(&rx->call_lock);
138
139         write_lock(&rxrpc_call_lock);
140         list_add_tail(&call->link, &rxrpc_calls);
141         write_unlock(&rxrpc_call_lock);
142
143         b->call_backlog[call_head] = call;
144         smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
145         _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
146         return 0;
147
148 id_in_use:
149         write_unlock(&rx->call_lock);
150         rxrpc_cleanup_call(call);
151         _leave(" = -EBADSLT");
152         return -EBADSLT;
153 }
154
155 /*
156  * Preallocate sufficient service connections, calls and peers to cover the
157  * entire backlog of a socket.  When a new call comes in, if we don't have
158  * sufficient of each available, the call gets rejected as busy or ignored.
159  *
160  * The backlog is replenished when a connection is accepted or rejected.
161  */
162 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
163 {
164         struct rxrpc_backlog *b = rx->backlog;
165
166         if (!b) {
167                 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
168                 if (!b)
169                         return -ENOMEM;
170                 rx->backlog = b;
171         }
172
173         if (rx->discard_new_call)
174                 return 0;
175
176         while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
177                 ;
178
179         return 0;
180 }
181
182 /*
183  * Discard the preallocation on a service.
184  */
185 void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
186 {
187         struct rxrpc_backlog *b = rx->backlog;
188         unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
189
190         if (!b)
191                 return;
192         rx->backlog = NULL;
193
194         /* Make sure that there aren't any incoming calls in progress before we
195          * clear the preallocation buffers.
196          */
197         spin_lock_bh(&rx->incoming_lock);
198         spin_unlock_bh(&rx->incoming_lock);
199
200         head = b->peer_backlog_head;
201         tail = b->peer_backlog_tail;
202         while (CIRC_CNT(head, tail, size) > 0) {
203                 struct rxrpc_peer *peer = b->peer_backlog[tail];
204                 kfree(peer);
205                 tail = (tail + 1) & (size - 1);
206         }
207
208         head = b->conn_backlog_head;
209         tail = b->conn_backlog_tail;
210         while (CIRC_CNT(head, tail, size) > 0) {
211                 struct rxrpc_connection *conn = b->conn_backlog[tail];
212                 write_lock(&rxrpc_connection_lock);
213                 list_del(&conn->link);
214                 list_del(&conn->proc_link);
215                 write_unlock(&rxrpc_connection_lock);
216                 kfree(conn);
217                 tail = (tail + 1) & (size - 1);
218         }
219
220         head = b->call_backlog_head;
221         tail = b->call_backlog_tail;
222         while (CIRC_CNT(head, tail, size) > 0) {
223                 struct rxrpc_call *call = b->call_backlog[tail];
224                 if (rx->discard_new_call) {
225                         _debug("discard %lx", call->user_call_ID);
226                         rx->discard_new_call(call, call->user_call_ID);
227                         rxrpc_put_call(call, rxrpc_call_put_kernel);
228                 }
229                 rxrpc_call_completed(call);
230                 rxrpc_release_call(rx, call);
231                 rxrpc_put_call(call, rxrpc_call_put);
232                 tail = (tail + 1) & (size - 1);
233         }
234
235         kfree(b);
236 }
237
238 /*
239  * Allocate a new incoming call from the prealloc pool, along with a connection
240  * and a peer as necessary.
241  */
242 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
243                                                     struct rxrpc_local *local,
244                                                     struct rxrpc_connection *conn,
245                                                     struct sk_buff *skb)
246 {
247         struct rxrpc_backlog *b = rx->backlog;
248         struct rxrpc_peer *peer, *xpeer;
249         struct rxrpc_call *call;
250         unsigned short call_head, conn_head, peer_head;
251         unsigned short call_tail, conn_tail, peer_tail;
252         unsigned short call_count, conn_count;
253
254         /* #calls >= #conns >= #peers must hold true. */
255         call_head = smp_load_acquire(&b->call_backlog_head);
256         call_tail = b->call_backlog_tail;
257         call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
258         conn_head = smp_load_acquire(&b->conn_backlog_head);
259         conn_tail = b->conn_backlog_tail;
260         conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
261         ASSERTCMP(conn_count, >=, call_count);
262         peer_head = smp_load_acquire(&b->peer_backlog_head);
263         peer_tail = b->peer_backlog_tail;
264         ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
265                   conn_count);
266
267         if (call_count == 0)
268                 return NULL;
269
270         if (!conn) {
271                 /* No connection.  We're going to need a peer to start off
272                  * with.  If one doesn't yet exist, use a spare from the
273                  * preallocation set.  We dump the address into the spare in
274                  * anticipation - and to save on stack space.
275                  */
276                 xpeer = b->peer_backlog[peer_tail];
277                 if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
278                         return NULL;
279
280                 peer = rxrpc_lookup_incoming_peer(local, xpeer);
281                 if (peer == xpeer) {
282                         b->peer_backlog[peer_tail] = NULL;
283                         smp_store_release(&b->peer_backlog_tail,
284                                           (peer_tail + 1) &
285                                           (RXRPC_BACKLOG_MAX - 1));
286                 }
287
288                 /* Now allocate and set up the connection */
289                 conn = b->conn_backlog[conn_tail];
290                 b->conn_backlog[conn_tail] = NULL;
291                 smp_store_release(&b->conn_backlog_tail,
292                                   (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
293                 rxrpc_get_local(local);
294                 conn->params.local = local;
295                 conn->params.peer = peer;
296                 rxrpc_see_connection(conn);
297                 rxrpc_new_incoming_connection(conn, skb);
298         } else {
299                 rxrpc_get_connection(conn);
300         }
301
302         /* And now we can allocate and set up a new call */
303         call = b->call_backlog[call_tail];
304         b->call_backlog[call_tail] = NULL;
305         smp_store_release(&b->call_backlog_tail,
306                           (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
307
308         rxrpc_see_call(call);
309         call->conn = conn;
310         call->peer = rxrpc_get_peer(conn->params.peer);
311         return call;
312 }
313
314 /*
315  * Set up a new incoming call.  Called in BH context with the RCU read lock
316  * held.
317  *
318  * If this is for a kernel service, when we allocate the call, it will have
319  * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
320  * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
321  * services only have the ref from the backlog buffer.  We want to pass this
322  * ref to non-BH context to dispose of.
323  *
324  * If we want to report an error, we mark the skb with the packet type and
325  * abort code and return NULL.
326  *
327  * The call is returned with the user access mutex held.
328  */
329 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
330                                            struct rxrpc_connection *conn,
331                                            struct sk_buff *skb)
332 {
333         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
334         struct rxrpc_sock *rx;
335         struct rxrpc_call *call;
336         u16 service_id = sp->hdr.serviceId;
337
338         _enter("");
339
340         /* Get the socket providing the service */
341         rx = rcu_dereference(local->service);
342         if (rx && service_id == rx->srx.srx_service)
343                 goto found_service;
344
345         trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
346                           RX_INVALID_OPERATION, EOPNOTSUPP);
347         skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
348         skb->priority = RX_INVALID_OPERATION;
349         _leave(" = NULL [service]");
350         return NULL;
351
352 found_service:
353         spin_lock(&rx->incoming_lock);
354         if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
355             rx->sk.sk_state == RXRPC_CLOSE) {
356                 trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
357                                   sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
358                 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
359                 skb->priority = RX_INVALID_OPERATION;
360                 _leave(" = NULL [close]");
361                 call = NULL;
362                 goto out;
363         }
364
365         call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
366         if (!call) {
367                 skb->mark = RXRPC_SKB_MARK_BUSY;
368                 _leave(" = NULL [busy]");
369                 call = NULL;
370                 goto out;
371         }
372
373         trace_rxrpc_receive(call, rxrpc_receive_incoming,
374                             sp->hdr.serial, sp->hdr.seq);
375
376         /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
377          * sendmsg()/recvmsg() inconveniently stealing the mutex once the
378          * notification is generated.
379          *
380          * The BUG should never happen because the kernel should be well
381          * behaved enough not to access the call before the first notification
382          * event and userspace is prevented from doing so until the state is
383          * appropriate.
384          */
385         if (!mutex_trylock(&call->user_mutex))
386                 BUG();
387
388         /* Make the call live. */
389         rxrpc_incoming_call(rx, call, skb);
390         conn = call->conn;
391
392         if (rx->notify_new_call)
393                 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
394         else
395                 sk_acceptq_added(&rx->sk);
396
397         spin_lock(&conn->state_lock);
398         switch (conn->state) {
399         case RXRPC_CONN_SERVICE_UNSECURED:
400                 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
401                 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
402                 rxrpc_queue_conn(call->conn);
403                 break;
404
405         case RXRPC_CONN_SERVICE:
406                 write_lock(&call->state_lock);
407                 if (rx->discard_new_call)
408                         call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
409                 else
410                         call->state = RXRPC_CALL_SERVER_ACCEPTING;
411                 write_unlock(&call->state_lock);
412                 break;
413
414         case RXRPC_CONN_REMOTELY_ABORTED:
415                 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
416                                           conn->remote_abort, ECONNABORTED);
417                 break;
418         case RXRPC_CONN_LOCALLY_ABORTED:
419                 rxrpc_abort_call("CON", call, sp->hdr.seq,
420                                  conn->local_abort, ECONNABORTED);
421                 break;
422         default:
423                 BUG();
424         }
425         spin_unlock(&conn->state_lock);
426
427         if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
428                 rxrpc_notify_socket(call);
429
430         /* We have to discard the prealloc queue's ref here and rely on a
431          * combination of the RCU read lock and refs held either by the socket
432          * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
433          * service to prevent the call from being deallocated too early.
434          */
435         rxrpc_put_call(call, rxrpc_call_put);
436
437         _leave(" = %p{%d}", call, call->debug_id);
438 out:
439         spin_unlock(&rx->incoming_lock);
440         return call;
441 }
442
443 /*
444  * handle acceptance of a call by userspace
445  * - assign the user call ID to the call at the front of the queue
446  * - called with the socket locked.
447  */
448 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
449                                      unsigned long user_call_ID,
450                                      rxrpc_notify_rx_t notify_rx)
451         __releases(&rx->sk.sk_lock.slock)
452 {
453         struct rxrpc_call *call;
454         struct rb_node *parent, **pp;
455         int ret;
456
457         _enter(",%lx", user_call_ID);
458
459         ASSERT(!irqs_disabled());
460
461         write_lock(&rx->call_lock);
462
463         if (list_empty(&rx->to_be_accepted)) {
464                 write_unlock(&rx->call_lock);
465                 release_sock(&rx->sk);
466                 kleave(" = -ENODATA [empty]");
467                 return ERR_PTR(-ENODATA);
468         }
469
470         /* check the user ID isn't already in use */
471         pp = &rx->calls.rb_node;
472         parent = NULL;
473         while (*pp) {
474                 parent = *pp;
475                 call = rb_entry(parent, struct rxrpc_call, sock_node);
476
477                 if (user_call_ID < call->user_call_ID)
478                         pp = &(*pp)->rb_left;
479                 else if (user_call_ID > call->user_call_ID)
480                         pp = &(*pp)->rb_right;
481                 else
482                         goto id_in_use;
483         }
484
485         /* Dequeue the first call and check it's still valid.  We gain
486          * responsibility for the queue's reference.
487          */
488         call = list_entry(rx->to_be_accepted.next,
489                           struct rxrpc_call, accept_link);
490         write_unlock(&rx->call_lock);
491
492         /* We need to gain the mutex from the interrupt handler without
493          * upsetting lockdep, so we have to release it there and take it here.
494          * We are, however, still holding the socket lock, so other accepts
495          * must wait for us and no one can add the user ID behind our backs.
496          */
497         if (mutex_lock_interruptible(&call->user_mutex) < 0) {
498                 release_sock(&rx->sk);
499                 kleave(" = -ERESTARTSYS");
500                 return ERR_PTR(-ERESTARTSYS);
501         }
502
503         write_lock(&rx->call_lock);
504         list_del_init(&call->accept_link);
505         sk_acceptq_removed(&rx->sk);
506         rxrpc_see_call(call);
507
508         /* Find the user ID insertion point. */
509         pp = &rx->calls.rb_node;
510         parent = NULL;
511         while (*pp) {
512                 parent = *pp;
513                 call = rb_entry(parent, struct rxrpc_call, sock_node);
514
515                 if (user_call_ID < call->user_call_ID)
516                         pp = &(*pp)->rb_left;
517                 else if (user_call_ID > call->user_call_ID)
518                         pp = &(*pp)->rb_right;
519                 else
520                         BUG();
521         }
522
523         write_lock_bh(&call->state_lock);
524         switch (call->state) {
525         case RXRPC_CALL_SERVER_ACCEPTING:
526                 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
527                 break;
528         case RXRPC_CALL_COMPLETE:
529                 ret = call->error;
530                 goto out_release;
531         default:
532                 BUG();
533         }
534
535         /* formalise the acceptance */
536         call->notify_rx = notify_rx;
537         call->user_call_ID = user_call_ID;
538         rxrpc_get_call(call, rxrpc_call_got_userid);
539         rb_link_node(&call->sock_node, parent, pp);
540         rb_insert_color(&call->sock_node, &rx->calls);
541         if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
542                 BUG();
543
544         write_unlock_bh(&call->state_lock);
545         write_unlock(&rx->call_lock);
546         rxrpc_notify_socket(call);
547         rxrpc_service_prealloc(rx, GFP_KERNEL);
548         release_sock(&rx->sk);
549         _leave(" = %p{%d}", call, call->debug_id);
550         return call;
551
552 out_release:
553         _debug("release %p", call);
554         write_unlock_bh(&call->state_lock);
555         write_unlock(&rx->call_lock);
556         rxrpc_release_call(rx, call);
557         rxrpc_put_call(call, rxrpc_call_put);
558         goto out;
559
560 id_in_use:
561         ret = -EBADSLT;
562         write_unlock(&rx->call_lock);
563 out:
564         rxrpc_service_prealloc(rx, GFP_KERNEL);
565         release_sock(&rx->sk);
566         _leave(" = %d", ret);
567         return ERR_PTR(ret);
568 }
569
570 /*
571  * Handle rejection of a call by userspace
572  * - reject the call at the front of the queue
573  */
574 int rxrpc_reject_call(struct rxrpc_sock *rx)
575 {
576         struct rxrpc_call *call;
577         bool abort = false;
578         int ret;
579
580         _enter("");
581
582         ASSERT(!irqs_disabled());
583
584         write_lock(&rx->call_lock);
585
586         if (list_empty(&rx->to_be_accepted)) {
587                 write_unlock(&rx->call_lock);
588                 return -ENODATA;
589         }
590
591         /* Dequeue the first call and check it's still valid.  We gain
592          * responsibility for the queue's reference.
593          */
594         call = list_entry(rx->to_be_accepted.next,
595                           struct rxrpc_call, accept_link);
596         list_del_init(&call->accept_link);
597         sk_acceptq_removed(&rx->sk);
598         rxrpc_see_call(call);
599
600         write_lock_bh(&call->state_lock);
601         switch (call->state) {
602         case RXRPC_CALL_SERVER_ACCEPTING:
603                 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED);
604                 abort = true;
605                 /* fall through */
606         case RXRPC_CALL_COMPLETE:
607                 ret = call->error;
608                 goto out_discard;
609         default:
610                 BUG();
611         }
612
613 out_discard:
614         write_unlock_bh(&call->state_lock);
615         write_unlock(&rx->call_lock);
616         if (abort) {
617                 rxrpc_send_abort_packet(call);
618                 rxrpc_release_call(rx, call);
619                 rxrpc_put_call(call, rxrpc_call_put);
620         }
621         rxrpc_service_prealloc(rx, GFP_KERNEL);
622         _leave(" = %d", ret);
623         return ret;
624 }
625
626 /*
627  * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
628  * @sock: The socket on which to preallocate
629  * @notify_rx: Event notification function for the call
630  * @user_attach_call: Func to attach call to user_call_ID
631  * @user_call_ID: The tag to attach to the preallocated call
632  * @gfp: The allocation conditions.
633  *
634  * Charge up the socket with preallocated calls, each with a user ID.  A
635  * function should be provided to effect the attachment from the user's side.
636  * The user is given a ref to hold on the call.
637  *
638  * Note that the call may be come connected before this function returns.
639  */
640 int rxrpc_kernel_charge_accept(struct socket *sock,
641                                rxrpc_notify_rx_t notify_rx,
642                                rxrpc_user_attach_call_t user_attach_call,
643                                unsigned long user_call_ID, gfp_t gfp)
644 {
645         struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
646         struct rxrpc_backlog *b = rx->backlog;
647
648         if (sock->sk->sk_state == RXRPC_CLOSE)
649                 return -ESHUTDOWN;
650
651         return rxrpc_service_prealloc_one(rx, b, notify_rx,
652                                           user_attach_call, user_call_ID,
653                                           gfp);
654 }
655 EXPORT_SYMBOL(rxrpc_kernel_charge_accept);