Merge patch series "riscv: dma-mapping: unify support for cache flushes"
[platform/kernel/linux-rpi.git] / net / tls / tls_device_fallback.c
1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2  *
3  * This software is available to you under a choice of one of two
4  * licenses.  You may choose to be licensed under the terms of the GNU
5  * General Public License (GPL) Version 2, available from the file
6  * COPYING in the main directory of this source tree, or the
7  * OpenIB.org BSD license below:
8  *
9  *     Redistribution and use in source and binary forms, with or
10  *     without modification, are permitted provided that the following
11  *     conditions are met:
12  *
13  *      - Redistributions of source code must retain the above
14  *        copyright notice, this list of conditions and the following
15  *        disclaimer.
16  *
17  *      - Redistributions in binary form must reproduce the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer in the documentation and/or other materials
20  *        provided with the distribution.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29  * SOFTWARE.
30  */
31
32 #include <net/tls.h>
33 #include <crypto/aead.h>
34 #include <crypto/scatterwalk.h>
35 #include <net/ip6_checksum.h>
36
37 #include "tls.h"
38
39 static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
40 {
41         struct scatterlist *src = walk->sg;
42         int diff = walk->offset - src->offset;
43
44         sg_set_page(sg, sg_page(src),
45                     src->length - diff, walk->offset);
46
47         scatterwalk_crypto_chain(sg, sg_next(src), 2);
48 }
49
50 static int tls_enc_record(struct aead_request *aead_req,
51                           struct crypto_aead *aead, char *aad,
52                           char *iv, __be64 rcd_sn,
53                           struct scatter_walk *in,
54                           struct scatter_walk *out, int *in_len,
55                           struct tls_prot_info *prot)
56 {
57         unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
58         const struct tls_cipher_desc *cipher_desc;
59         struct scatterlist sg_in[3];
60         struct scatterlist sg_out[3];
61         unsigned int buf_size;
62         u16 len;
63         int rc;
64
65         switch (prot->cipher_type) {
66         case TLS_CIPHER_AES_GCM_128:
67         case TLS_CIPHER_AES_GCM_256:
68                 break;
69         default:
70                 return -EINVAL;
71         }
72         cipher_desc = get_cipher_desc(prot->cipher_type);
73
74         buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
75         len = min_t(int, *in_len, buf_size);
76
77         scatterwalk_copychunks(buf, in, len, 0);
78         scatterwalk_copychunks(buf, out, len, 1);
79
80         *in_len -= len;
81         if (!*in_len)
82                 return 0;
83
84         scatterwalk_pagedone(in, 0, 1);
85         scatterwalk_pagedone(out, 1, 1);
86
87         len = buf[4] | (buf[3] << 8);
88         len -= cipher_desc->iv;
89
90         tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);
91
92         memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);
93
94         sg_init_table(sg_in, ARRAY_SIZE(sg_in));
95         sg_init_table(sg_out, ARRAY_SIZE(sg_out));
96         sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
97         sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
98         chain_to_walk(sg_in + 1, in);
99         chain_to_walk(sg_out + 1, out);
100
101         *in_len -= len;
102         if (*in_len < 0) {
103                 *in_len += cipher_desc->tag;
104                 /* the input buffer doesn't contain the entire record.
105                  * trim len accordingly. The resulting authentication tag
106                  * will contain garbage, but we don't care, so we won't
107                  * include any of it in the output skb
108                  * Note that we assume the output buffer length
109                  * is larger then input buffer length + tag size
110                  */
111                 if (*in_len < 0)
112                         len += *in_len;
113
114                 *in_len = 0;
115         }
116
117         if (*in_len) {
118                 scatterwalk_copychunks(NULL, in, len, 2);
119                 scatterwalk_pagedone(in, 0, 1);
120                 scatterwalk_copychunks(NULL, out, len, 2);
121                 scatterwalk_pagedone(out, 1, 1);
122         }
123
124         len -= cipher_desc->tag;
125         aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
126
127         rc = crypto_aead_encrypt(aead_req);
128
129         return rc;
130 }
131
132 static void tls_init_aead_request(struct aead_request *aead_req,
133                                   struct crypto_aead *aead)
134 {
135         aead_request_set_tfm(aead_req, aead);
136         aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
137 }
138
139 static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
140                                                    gfp_t flags)
141 {
142         unsigned int req_size = sizeof(struct aead_request) +
143                 crypto_aead_reqsize(aead);
144         struct aead_request *aead_req;
145
146         aead_req = kzalloc(req_size, flags);
147         if (aead_req)
148                 tls_init_aead_request(aead_req, aead);
149         return aead_req;
150 }
151
152 static int tls_enc_records(struct aead_request *aead_req,
153                            struct crypto_aead *aead, struct scatterlist *sg_in,
154                            struct scatterlist *sg_out, char *aad, char *iv,
155                            u64 rcd_sn, int len, struct tls_prot_info *prot)
156 {
157         struct scatter_walk out, in;
158         int rc;
159
160         scatterwalk_start(&in, sg_in);
161         scatterwalk_start(&out, sg_out);
162
163         do {
164                 rc = tls_enc_record(aead_req, aead, aad, iv,
165                                     cpu_to_be64(rcd_sn), &in, &out, &len, prot);
166                 rcd_sn++;
167
168         } while (rc == 0 && len);
169
170         scatterwalk_done(&in, 0, 0);
171         scatterwalk_done(&out, 1, 0);
172
173         return rc;
174 }
175
176 /* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
177  * might have been changed by NAT.
178  */
179 static void update_chksum(struct sk_buff *skb, int headln)
180 {
181         struct tcphdr *th = tcp_hdr(skb);
182         int datalen = skb->len - headln;
183         const struct ipv6hdr *ipv6h;
184         const struct iphdr *iph;
185
186         /* We only changed the payload so if we are using partial we don't
187          * need to update anything.
188          */
189         if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
190                 return;
191
192         skb->ip_summed = CHECKSUM_PARTIAL;
193         skb->csum_start = skb_transport_header(skb) - skb->head;
194         skb->csum_offset = offsetof(struct tcphdr, check);
195
196         if (skb->sk->sk_family == AF_INET6) {
197                 ipv6h = ipv6_hdr(skb);
198                 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
199                                              datalen, IPPROTO_TCP, 0);
200         } else {
201                 iph = ip_hdr(skb);
202                 th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
203                                                IPPROTO_TCP, 0);
204         }
205 }
206
207 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
208 {
209         struct sock *sk = skb->sk;
210         int delta;
211
212         skb_copy_header(nskb, skb);
213
214         skb_put(nskb, skb->len);
215         memcpy(nskb->data, skb->data, headln);
216
217         nskb->destructor = skb->destructor;
218         nskb->sk = sk;
219         skb->destructor = NULL;
220         skb->sk = NULL;
221
222         update_chksum(nskb, headln);
223
224         /* sock_efree means skb must gone through skb_orphan_partial() */
225         if (nskb->destructor == sock_efree)
226                 return;
227
228         delta = nskb->truesize - skb->truesize;
229         if (likely(delta < 0))
230                 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
231         else if (delta)
232                 refcount_add(delta, &sk->sk_wmem_alloc);
233 }
234
235 /* This function may be called after the user socket is already
236  * closed so make sure we don't use anything freed during
237  * tls_sk_proto_close here
238  */
239
240 static int fill_sg_in(struct scatterlist *sg_in,
241                       struct sk_buff *skb,
242                       struct tls_offload_context_tx *ctx,
243                       u64 *rcd_sn,
244                       s32 *sync_size,
245                       int *resync_sgs)
246 {
247         int tcp_payload_offset = skb_tcp_all_headers(skb);
248         int payload_len = skb->len - tcp_payload_offset;
249         u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
250         struct tls_record_info *record;
251         unsigned long flags;
252         int remaining;
253         int i;
254
255         spin_lock_irqsave(&ctx->lock, flags);
256         record = tls_get_record(ctx, tcp_seq, rcd_sn);
257         if (!record) {
258                 spin_unlock_irqrestore(&ctx->lock, flags);
259                 return -EINVAL;
260         }
261
262         *sync_size = tcp_seq - tls_record_start_seq(record);
263         if (*sync_size < 0) {
264                 int is_start_marker = tls_record_is_start_marker(record);
265
266                 spin_unlock_irqrestore(&ctx->lock, flags);
267                 /* This should only occur if the relevant record was
268                  * already acked. In that case it should be ok
269                  * to drop the packet and avoid retransmission.
270                  *
271                  * There is a corner case where the packet contains
272                  * both an acked and a non-acked record.
273                  * We currently don't handle that case and rely
274                  * on TCP to retransmit a packet that doesn't contain
275                  * already acked payload.
276                  */
277                 if (!is_start_marker)
278                         *sync_size = 0;
279                 return -EINVAL;
280         }
281
282         remaining = *sync_size;
283         for (i = 0; remaining > 0; i++) {
284                 skb_frag_t *frag = &record->frags[i];
285
286                 __skb_frag_ref(frag);
287                 sg_set_page(sg_in + i, skb_frag_page(frag),
288                             skb_frag_size(frag), skb_frag_off(frag));
289
290                 remaining -= skb_frag_size(frag);
291
292                 if (remaining < 0)
293                         sg_in[i].length += remaining;
294         }
295         *resync_sgs = i;
296
297         spin_unlock_irqrestore(&ctx->lock, flags);
298         if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
299                 return -EINVAL;
300
301         return 0;
302 }
303
304 static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
305                         struct tls_context *tls_ctx,
306                         struct sk_buff *nskb,
307                         int tcp_payload_offset,
308                         int payload_len,
309                         int sync_size,
310                         void *dummy_buf)
311 {
312         const struct tls_cipher_desc *cipher_desc =
313                 get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
314
315         sg_set_buf(&sg_out[0], dummy_buf, sync_size);
316         sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
317         /* Add room for authentication tag produced by crypto */
318         dummy_buf += sync_size;
319         sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
320 }
321
322 static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
323                                    struct scatterlist sg_out[3],
324                                    struct scatterlist *sg_in,
325                                    struct sk_buff *skb,
326                                    s32 sync_size, u64 rcd_sn)
327 {
328         struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
329         int tcp_payload_offset = skb_tcp_all_headers(skb);
330         int payload_len = skb->len - tcp_payload_offset;
331         const struct tls_cipher_desc *cipher_desc;
332         void *buf, *iv, *aad, *dummy_buf, *salt;
333         struct aead_request *aead_req;
334         struct sk_buff *nskb = NULL;
335         int buf_len;
336
337         aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
338         if (!aead_req)
339                 return NULL;
340
341         switch (tls_ctx->crypto_send.info.cipher_type) {
342         case TLS_CIPHER_AES_GCM_128:
343                 salt = tls_ctx->crypto_send.aes_gcm_128.salt;
344                 break;
345         case TLS_CIPHER_AES_GCM_256:
346                 salt = tls_ctx->crypto_send.aes_gcm_256.salt;
347                 break;
348         default:
349                 goto free_req;
350         }
351         cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
352         buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
353                   sync_size + cipher_desc->tag;
354         buf = kmalloc(buf_len, GFP_ATOMIC);
355         if (!buf)
356                 goto free_req;
357
358         iv = buf;
359         memcpy(iv, salt, cipher_desc->salt);
360         aad = buf + cipher_desc->salt + cipher_desc->iv;
361         dummy_buf = aad + TLS_AAD_SPACE_SIZE;
362
363         nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
364         if (!nskb)
365                 goto free_buf;
366
367         skb_reserve(nskb, skb_headroom(skb));
368
369         fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
370                     payload_len, sync_size, dummy_buf);
371
372         if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
373                             rcd_sn, sync_size + payload_len,
374                             &tls_ctx->prot_info) < 0)
375                 goto free_nskb;
376
377         complete_skb(nskb, skb, tcp_payload_offset);
378
379         /* validate_xmit_skb_list assumes that if the skb wasn't segmented
380          * nskb->prev will point to the skb itself
381          */
382         nskb->prev = nskb;
383
384 free_buf:
385         kfree(buf);
386 free_req:
387         kfree(aead_req);
388         return nskb;
389 free_nskb:
390         kfree_skb(nskb);
391         nskb = NULL;
392         goto free_buf;
393 }
394
395 static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
396 {
397         int tcp_payload_offset = skb_tcp_all_headers(skb);
398         struct tls_context *tls_ctx = tls_get_ctx(sk);
399         struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
400         int payload_len = skb->len - tcp_payload_offset;
401         struct scatterlist *sg_in, sg_out[3];
402         struct sk_buff *nskb = NULL;
403         int sg_in_max_elements;
404         int resync_sgs = 0;
405         s32 sync_size = 0;
406         u64 rcd_sn;
407
408         /* worst case is:
409          * MAX_SKB_FRAGS in tls_record_info
410          * MAX_SKB_FRAGS + 1 in SKB head and frags.
411          */
412         sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
413
414         if (!payload_len)
415                 return skb;
416
417         sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
418         if (!sg_in)
419                 goto free_orig;
420
421         sg_init_table(sg_in, sg_in_max_elements);
422         sg_init_table(sg_out, ARRAY_SIZE(sg_out));
423
424         if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
425                 /* bypass packets before kernel TLS socket option was set */
426                 if (sync_size < 0 && payload_len <= -sync_size)
427                         nskb = skb_get(skb);
428                 goto put_sg;
429         }
430
431         nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
432
433 put_sg:
434         while (resync_sgs)
435                 put_page(sg_page(&sg_in[--resync_sgs]));
436         kfree(sg_in);
437 free_orig:
438         if (nskb)
439                 consume_skb(skb);
440         else
441                 kfree_skb(skb);
442         return nskb;
443 }
444
445 struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
446                                       struct net_device *dev,
447                                       struct sk_buff *skb)
448 {
449         if (dev == rcu_dereference_bh(tls_get_ctx(sk)->netdev) ||
450             netif_is_bond_master(dev))
451                 return skb;
452
453         return tls_sw_fallback(sk, skb);
454 }
455 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
456
457 struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
458                                          struct net_device *dev,
459                                          struct sk_buff *skb)
460 {
461         return tls_sw_fallback(sk, skb);
462 }
463
464 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
465 {
466         return tls_sw_fallback(skb->sk, skb);
467 }
468 EXPORT_SYMBOL_GPL(tls_encrypt_skb);
469
470 int tls_sw_fallback_init(struct sock *sk,
471                          struct tls_offload_context_tx *offload_ctx,
472                          struct tls_crypto_info *crypto_info)
473 {
474         const struct tls_cipher_desc *cipher_desc;
475         int rc;
476
477         cipher_desc = get_cipher_desc(crypto_info->cipher_type);
478         if (!cipher_desc || !cipher_desc->offloadable)
479                 return -EINVAL;
480
481         offload_ctx->aead_send =
482             crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC);
483         if (IS_ERR(offload_ctx->aead_send)) {
484                 rc = PTR_ERR(offload_ctx->aead_send);
485                 pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
486                 offload_ctx->aead_send = NULL;
487                 goto err_out;
488         }
489
490         rc = crypto_aead_setkey(offload_ctx->aead_send,
491                                 crypto_info_key(crypto_info, cipher_desc),
492                                 cipher_desc->key);
493         if (rc)
494                 goto free_aead;
495
496         rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
497         if (rc)
498                 goto free_aead;
499
500         return 0;
501 free_aead:
502         crypto_free_aead(offload_ctx->aead_send);
503 err_out:
504         return rc;
505 }